var/home/core/zuul-output/0000755000175000017500000000000015110275721014526 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015110305001015453 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000004623670715110304774017716 0ustar rootrootNov 22 09:03:23 crc systemd[1]: Starting Kubernetes Kubelet... Nov 22 09:03:23 crc restorecon[4558]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 22 09:03:23 crc restorecon[4558]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 22 09:03:24 crc kubenswrapper[4693]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 09:03:24 crc kubenswrapper[4693]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 22 09:03:24 crc kubenswrapper[4693]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 09:03:24 crc kubenswrapper[4693]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 09:03:24 crc kubenswrapper[4693]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 22 09:03:24 crc kubenswrapper[4693]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.021630 4693 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026283 4693 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026301 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026307 4693 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026311 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026315 4693 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026319 4693 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026323 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026328 4693 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026333 4693 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026337 4693 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026340 4693 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026343 4693 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026346 4693 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026350 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026353 4693 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026356 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026359 4693 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026363 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026367 4693 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026370 4693 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026373 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026377 4693 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026381 4693 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026385 4693 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026389 4693 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026394 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026397 4693 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026400 4693 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026404 4693 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026407 4693 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026411 4693 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026414 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026418 4693 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026422 4693 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026425 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026435 4693 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026439 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026442 4693 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026446 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026449 4693 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026454 4693 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026458 4693 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026461 4693 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026465 4693 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026468 4693 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026471 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026475 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026478 4693 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026481 4693 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026485 4693 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026488 4693 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026491 4693 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026494 4693 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026498 4693 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026501 4693 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026505 4693 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026509 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026512 4693 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026516 4693 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026521 4693 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026525 4693 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026529 4693 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026533 4693 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026537 4693 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026540 4693 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026544 4693 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026548 4693 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026552 4693 feature_gate.go:330] unrecognized feature gate: Example Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026557 4693 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026562 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.026567 4693 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027317 4693 flags.go:64] FLAG: --address="0.0.0.0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027335 4693 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027344 4693 flags.go:64] FLAG: --anonymous-auth="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027349 4693 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027354 4693 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027359 4693 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027364 4693 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027369 4693 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027373 4693 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027378 4693 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027382 4693 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027387 4693 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027391 4693 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027395 4693 flags.go:64] FLAG: --cgroup-root="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027400 4693 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027404 4693 flags.go:64] FLAG: --client-ca-file="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027408 4693 flags.go:64] FLAG: --cloud-config="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027412 4693 flags.go:64] FLAG: --cloud-provider="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027416 4693 flags.go:64] FLAG: --cluster-dns="[]" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027420 4693 flags.go:64] FLAG: --cluster-domain="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027424 4693 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027428 4693 flags.go:64] FLAG: --config-dir="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027432 4693 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027437 4693 flags.go:64] FLAG: --container-log-max-files="5" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027441 4693 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027445 4693 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027450 4693 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027454 4693 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027458 4693 flags.go:64] FLAG: --contention-profiling="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027462 4693 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027467 4693 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027470 4693 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027474 4693 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027479 4693 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027482 4693 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027486 4693 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027490 4693 flags.go:64] FLAG: --enable-load-reader="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027493 4693 flags.go:64] FLAG: --enable-server="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027497 4693 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027503 4693 flags.go:64] FLAG: --event-burst="100" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027507 4693 flags.go:64] FLAG: --event-qps="50" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027512 4693 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027516 4693 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027520 4693 flags.go:64] FLAG: --eviction-hard="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027524 4693 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027528 4693 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027532 4693 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027537 4693 flags.go:64] FLAG: --eviction-soft="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027540 4693 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027544 4693 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027548 4693 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027552 4693 flags.go:64] FLAG: --experimental-mounter-path="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027555 4693 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027559 4693 flags.go:64] FLAG: --fail-swap-on="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027563 4693 flags.go:64] FLAG: --feature-gates="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027568 4693 flags.go:64] FLAG: --file-check-frequency="20s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027572 4693 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027576 4693 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027580 4693 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027584 4693 flags.go:64] FLAG: --healthz-port="10248" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027588 4693 flags.go:64] FLAG: --help="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027592 4693 flags.go:64] FLAG: --hostname-override="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027596 4693 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027599 4693 flags.go:64] FLAG: --http-check-frequency="20s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027603 4693 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027606 4693 flags.go:64] FLAG: --image-credential-provider-config="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027610 4693 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027614 4693 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027617 4693 flags.go:64] FLAG: --image-service-endpoint="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027621 4693 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027624 4693 flags.go:64] FLAG: --kube-api-burst="100" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027628 4693 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027632 4693 flags.go:64] FLAG: --kube-api-qps="50" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027635 4693 flags.go:64] FLAG: --kube-reserved="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027639 4693 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027643 4693 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027646 4693 flags.go:64] FLAG: --kubelet-cgroups="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027650 4693 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027654 4693 flags.go:64] FLAG: --lock-file="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027658 4693 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027662 4693 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027667 4693 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027672 4693 flags.go:64] FLAG: --log-json-split-stream="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027678 4693 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027682 4693 flags.go:64] FLAG: --log-text-split-stream="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027686 4693 flags.go:64] FLAG: --logging-format="text" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027690 4693 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027694 4693 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027699 4693 flags.go:64] FLAG: --manifest-url="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027702 4693 flags.go:64] FLAG: --manifest-url-header="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027707 4693 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027713 4693 flags.go:64] FLAG: --max-open-files="1000000" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027718 4693 flags.go:64] FLAG: --max-pods="110" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027722 4693 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027725 4693 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027729 4693 flags.go:64] FLAG: --memory-manager-policy="None" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027732 4693 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027737 4693 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027741 4693 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027745 4693 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027754 4693 flags.go:64] FLAG: --node-status-max-images="50" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027758 4693 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027762 4693 flags.go:64] FLAG: --oom-score-adj="-999" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027766 4693 flags.go:64] FLAG: --pod-cidr="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027770 4693 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027776 4693 flags.go:64] FLAG: --pod-manifest-path="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027780 4693 flags.go:64] FLAG: --pod-max-pids="-1" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027783 4693 flags.go:64] FLAG: --pods-per-core="0" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027787 4693 flags.go:64] FLAG: --port="10250" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027791 4693 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027795 4693 flags.go:64] FLAG: --provider-id="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027798 4693 flags.go:64] FLAG: --qos-reserved="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027802 4693 flags.go:64] FLAG: --read-only-port="10255" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027806 4693 flags.go:64] FLAG: --register-node="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027809 4693 flags.go:64] FLAG: --register-schedulable="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027813 4693 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027819 4693 flags.go:64] FLAG: --registry-burst="10" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027822 4693 flags.go:64] FLAG: --registry-qps="5" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027826 4693 flags.go:64] FLAG: --reserved-cpus="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027831 4693 flags.go:64] FLAG: --reserved-memory="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027835 4693 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027852 4693 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027857 4693 flags.go:64] FLAG: --rotate-certificates="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027863 4693 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027866 4693 flags.go:64] FLAG: --runonce="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027873 4693 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027877 4693 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027881 4693 flags.go:64] FLAG: --seccomp-default="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027885 4693 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027890 4693 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027894 4693 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027898 4693 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027902 4693 flags.go:64] FLAG: --storage-driver-password="root" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027906 4693 flags.go:64] FLAG: --storage-driver-secure="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027910 4693 flags.go:64] FLAG: --storage-driver-table="stats" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027914 4693 flags.go:64] FLAG: --storage-driver-user="root" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027918 4693 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027922 4693 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027926 4693 flags.go:64] FLAG: --system-cgroups="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027930 4693 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027937 4693 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027941 4693 flags.go:64] FLAG: --tls-cert-file="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027945 4693 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027951 4693 flags.go:64] FLAG: --tls-min-version="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027955 4693 flags.go:64] FLAG: --tls-private-key-file="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027960 4693 flags.go:64] FLAG: --topology-manager-policy="none" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027963 4693 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027967 4693 flags.go:64] FLAG: --topology-manager-scope="container" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027971 4693 flags.go:64] FLAG: --v="2" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027983 4693 flags.go:64] FLAG: --version="false" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027988 4693 flags.go:64] FLAG: --vmodule="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027992 4693 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.027997 4693 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028107 4693 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028112 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028119 4693 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028124 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028129 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028133 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028137 4693 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028141 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028144 4693 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028148 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028151 4693 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028154 4693 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028158 4693 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028161 4693 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028165 4693 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028168 4693 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028171 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028174 4693 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028178 4693 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028181 4693 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028184 4693 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028187 4693 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028190 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028193 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028197 4693 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028200 4693 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028203 4693 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028206 4693 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028209 4693 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028213 4693 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028216 4693 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028219 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028222 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028225 4693 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028230 4693 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028233 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028238 4693 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028242 4693 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028247 4693 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028250 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028254 4693 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028257 4693 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028260 4693 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028263 4693 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028267 4693 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028270 4693 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028273 4693 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028276 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028280 4693 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028283 4693 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028286 4693 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028290 4693 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028293 4693 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028296 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028300 4693 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028303 4693 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028306 4693 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028310 4693 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028314 4693 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028318 4693 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028322 4693 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028326 4693 feature_gate.go:330] unrecognized feature gate: Example Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028330 4693 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028334 4693 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028337 4693 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028341 4693 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028345 4693 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028348 4693 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028352 4693 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028355 4693 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.028359 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.028364 4693 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.034539 4693 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.034570 4693 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034648 4693 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034660 4693 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034664 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034668 4693 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034672 4693 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034676 4693 feature_gate.go:330] unrecognized feature gate: Example Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034681 4693 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034684 4693 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034687 4693 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034693 4693 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034698 4693 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034702 4693 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034705 4693 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034709 4693 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034713 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034716 4693 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034720 4693 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034723 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034726 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034729 4693 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034733 4693 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034736 4693 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034749 4693 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034753 4693 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034758 4693 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034762 4693 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034766 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034769 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034772 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034775 4693 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034780 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034783 4693 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034786 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034789 4693 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034793 4693 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034797 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034800 4693 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034804 4693 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034807 4693 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034810 4693 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034814 4693 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034817 4693 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034821 4693 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034824 4693 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034827 4693 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034830 4693 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034834 4693 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034837 4693 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034852 4693 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034856 4693 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034859 4693 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034863 4693 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034866 4693 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034869 4693 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034873 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034876 4693 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034880 4693 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034884 4693 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034888 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034891 4693 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034895 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034898 4693 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034902 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034905 4693 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034909 4693 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034913 4693 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034917 4693 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034921 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034924 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034928 4693 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.034933 4693 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.034940 4693 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035077 4693 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035085 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035089 4693 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035093 4693 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035097 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035101 4693 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035104 4693 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035108 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035112 4693 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035115 4693 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035119 4693 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035125 4693 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035129 4693 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035133 4693 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035137 4693 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035140 4693 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035144 4693 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035147 4693 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035151 4693 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035154 4693 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035157 4693 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035161 4693 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035165 4693 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035169 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035172 4693 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035176 4693 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035179 4693 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035183 4693 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035186 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035190 4693 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035195 4693 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035199 4693 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035203 4693 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035207 4693 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035211 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035216 4693 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035221 4693 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035225 4693 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035229 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035233 4693 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035237 4693 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035241 4693 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035244 4693 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035248 4693 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035251 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035254 4693 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035258 4693 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035261 4693 feature_gate.go:330] unrecognized feature gate: Example Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035264 4693 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035268 4693 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035271 4693 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035274 4693 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035278 4693 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035281 4693 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035284 4693 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035288 4693 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035291 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035295 4693 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035298 4693 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035301 4693 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035304 4693 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035307 4693 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035312 4693 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035316 4693 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035320 4693 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035324 4693 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035328 4693 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035331 4693 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035335 4693 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035339 4693 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.035343 4693 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.035349 4693 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.035494 4693 server.go:940] "Client rotation is on, will bootstrap in background" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.037991 4693 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.038064 4693 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.038790 4693 server.go:997] "Starting client certificate rotation" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.038819 4693 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.039225 4693 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-15 16:37:06.046370747 +0000 UTC Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.039313 4693 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 559h33m42.00706067s for next certificate rotation Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.048827 4693 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.050188 4693 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.060761 4693 log.go:25] "Validated CRI v1 runtime API" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.077371 4693 log.go:25] "Validated CRI v1 image API" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.078413 4693 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.081186 4693 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-22-09-00-15-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.081212 4693 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:49 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/containers/storage/overlay-containers/75d81934760b26101869fbd8e4b5954c62b019c1cc3e5a0c9f82ed8de46b3b22/userdata/shm:{mountpoint:/var/lib/containers/storage/overlay-containers/75d81934760b26101869fbd8e4b5954c62b019c1cc3e5a0c9f82ed8de46b3b22/userdata/shm major:0 minor:42 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:50 fsType:tmpfs blockSize:0} overlay_0-43:{mountpoint:/var/lib/containers/storage/overlay/94b752e0a51c0134b00ddef6dc7a933a9d7c1d9bdc88a18dae4192a0d557d623/merged major:0 minor:43 fsType:overlay blockSize:0}] Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.094521 4693 manager.go:217] Machine: {Timestamp:2025-11-22 09:03:24.093144903 +0000 UTC m=+0.235647214 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2445406 MemoryCapacity:33654120448 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:15f0b692-3998-4879-8228-aabd9ff9e80a BootID:e67c00bb-ce74-4d2e-879d-8186ebb300ff Filesystems:[{Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827060224 Type:vfs Inodes:1048576 HasInodes:true} {Device:/var/lib/containers/storage/overlay-containers/75d81934760b26101869fbd8e4b5954c62b019c1cc3e5a0c9f82ed8de46b3b22/userdata/shm DeviceMajor:0 DeviceMinor:42 Capacity:65536000 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:49 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:overlay_0-43 DeviceMajor:0 DeviceMinor:43 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:50 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:95:2a:77 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:enp3s0 MacAddress:fa:16:3e:95:2a:77 Speed:-1 Mtu:1500} {Name:enp7s0 MacAddress:fa:16:3e:01:2e:b1 Speed:-1 Mtu:1440} {Name:enp7s0.20 MacAddress:52:54:00:6e:25:20 Speed:-1 Mtu:1436} {Name:enp7s0.21 MacAddress:52:54:00:8a:17:57 Speed:-1 Mtu:1436} {Name:enp7s0.22 MacAddress:52:54:00:3b:f4:59 Speed:-1 Mtu:1436} {Name:eth10 MacAddress:32:ec:4a:00:39:d8 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:9e:11:3d:b5:74:24 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654120448 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:65536 Type:Data Level:1} {Id:0 Size:65536 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:65536 Type:Data Level:1} {Id:1 Size:65536 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:65536 Type:Data Level:1} {Id:10 Size:65536 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:65536 Type:Data Level:1} {Id:11 Size:65536 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:65536 Type:Data Level:1} {Id:2 Size:65536 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:65536 Type:Data Level:1} {Id:3 Size:65536 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:65536 Type:Data Level:1} {Id:4 Size:65536 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:65536 Type:Data Level:1} {Id:5 Size:65536 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:65536 Type:Data Level:1} {Id:6 Size:65536 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:65536 Type:Data Level:1} {Id:7 Size:65536 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:65536 Type:Data Level:1} {Id:8 Size:65536 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:65536 Type:Data Level:1} {Id:9 Size:65536 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.094674 4693 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.094887 4693 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.095127 4693 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.095272 4693 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.095295 4693 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.095716 4693 topology_manager.go:138] "Creating topology manager with none policy" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.095735 4693 container_manager_linux.go:303] "Creating device plugin manager" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.096137 4693 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.096157 4693 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.096486 4693 state_mem.go:36] "Initialized new in-memory state store" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.096561 4693 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.098914 4693 kubelet.go:418] "Attempting to sync node with API server" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.098933 4693 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.098947 4693 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.098956 4693 kubelet.go:324] "Adding apiserver pod source" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.098965 4693 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.100645 4693 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.101156 4693 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.102189 4693 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.102575 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.102643 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.102576 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.102684 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103108 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103127 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103133 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103140 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103150 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103156 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103162 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103171 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103178 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103185 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103202 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103208 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.103656 4693 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.104005 4693 server.go:1280] "Started kubelet" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.104530 4693 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.104749 4693 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.105023 4693 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 22 09:03:24 crc systemd[1]: Started Kubernetes Kubelet. Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.105141 4693 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.107368 4693 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.107393 4693 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.107447 4693 server.go:460] "Adding debug handlers to kubelet server" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.107755 4693 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 16:07:38.179815066 +0000 UTC Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.108464 4693 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.108690 4693 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.108815 4693 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.109205 4693 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.107578 4693 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 192.168.25.249:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187a48c275ab0efd default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-22 09:03:24.103970557 +0000 UTC m=+0.246472849,LastTimestamp:2025-11-22 09:03:24.103970557 +0000 UTC m=+0.246472849,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.109938 4693 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.110058 4693 factory.go:55] Registering systemd factory Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.110075 4693 factory.go:221] Registration of the systemd container factory successfully Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.110948 4693 factory.go:153] Registering CRI-O factory Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.110964 4693 factory.go:221] Registration of the crio container factory successfully Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.110990 4693 factory.go:103] Registering Raw factory Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.111023 4693 manager.go:1196] Started watching for new ooms in manager Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.112135 4693 manager.go:319] Starting recovery of all containers Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.112237 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.112801 4693 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.249:6443: connect: connection refused" interval="200ms" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.113633 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118644 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118689 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118702 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118713 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118725 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118736 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118750 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118761 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118773 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118784 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118794 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118806 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118816 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118828 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118837 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118870 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118894 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118906 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118914 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118923 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118932 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118948 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118956 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118964 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118984 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.118994 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119019 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119030 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119041 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119051 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119059 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119069 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119079 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119088 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119097 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119106 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119116 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119123 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119133 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119143 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119152 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119162 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119171 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119181 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119192 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119202 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119210 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119220 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119228 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119238 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119247 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119256 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119267 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119281 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119290 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119299 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119308 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119317 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119326 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119337 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119346 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119355 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119363 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119373 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119382 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119392 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119401 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119412 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119421 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119429 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119439 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119450 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119460 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119469 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119478 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119487 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119496 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119505 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119513 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119523 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119532 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119539 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119547 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119558 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119566 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119574 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119583 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119591 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119600 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119608 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119616 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119625 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119634 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119644 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119651 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119662 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119674 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119684 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119692 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119701 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119711 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119720 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119729 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119753 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119766 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119776 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119786 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119796 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119806 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119816 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119826 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119836 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119862 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119875 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119884 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119892 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119902 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119912 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119921 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119929 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119938 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119947 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119955 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119963 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119972 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.119995 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120004 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120014 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120025 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120034 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120043 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120051 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120059 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120067 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120077 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120086 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120095 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120104 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120113 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120121 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120130 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120139 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120149 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120158 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120168 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120177 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120185 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120193 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120202 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120212 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120222 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120231 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120240 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120250 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120260 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120270 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120279 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120290 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120300 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120310 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120323 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120331 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120339 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120348 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120357 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120367 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120375 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120385 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120394 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120402 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120411 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120419 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120426 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120434 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120443 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120453 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120463 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120471 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120479 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120487 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120495 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120503 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120512 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120521 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120530 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120538 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120546 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120555 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120563 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120572 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120625 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120636 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120646 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120657 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120666 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120677 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.120685 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123497 4693 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123525 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123535 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123545 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123556 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123566 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123577 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123587 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123597 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123608 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123618 4693 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123627 4693 reconstruct.go:97] "Volume reconstruction finished" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.123633 4693 reconciler.go:26] "Reconciler: start to sync state" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.128874 4693 manager.go:324] Recovery completed Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.143278 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.144037 4693 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.145498 4693 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.145529 4693 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.145561 4693 kubelet.go:2335] "Starting kubelet main sync loop" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.145601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.145629 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.145640 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.145601 4693 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.146710 4693 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.146730 4693 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.146748 4693 state_mem.go:36] "Initialized new in-memory state store" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.147190 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.147256 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.150691 4693 policy_none.go:49] "None policy: Start" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.151341 4693 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.151363 4693 state_mem.go:35] "Initializing new in-memory state store" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.204543 4693 manager.go:334] "Starting Device Plugin manager" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.204580 4693 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.204600 4693 server.go:79] "Starting device plugin registration server" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.204938 4693 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.204969 4693 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.205174 4693 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.205247 4693 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.205259 4693 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.210461 4693 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.245785 4693 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.245926 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.246953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247016 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247184 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247448 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247490 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247811 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247836 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247866 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.247961 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248134 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248169 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248276 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248292 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248586 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248609 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248621 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248724 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248797 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248826 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248838 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248837 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.248910 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249290 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249326 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249403 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249517 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249542 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249550 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249597 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249900 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249925 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.249936 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250071 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250094 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250457 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250495 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250587 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250607 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.250615 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.305234 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.306219 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.306243 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.306254 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.306274 4693 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.307268 4693 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.249:6443: connect: connection refused" node="crc" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.313778 4693 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.249:6443: connect: connection refused" interval="400ms" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325291 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325319 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325341 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325357 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325384 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325398 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325431 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325469 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325486 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325508 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325562 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325598 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325624 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.325644 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426300 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426336 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426355 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426374 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426386 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426402 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426417 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426432 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426425 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426452 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426469 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426444 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426498 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426505 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426528 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426537 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426547 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426540 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426560 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426593 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426613 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426640 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426648 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426693 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426712 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426740 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426751 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426772 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426795 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.426918 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.507473 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.509116 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.509159 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.509192 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.509225 4693 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.510099 4693 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.249:6443: connect: connection refused" node="crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.576913 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.580733 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.599798 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-443cb8d6a2866cf3d6f6269585797efb218cc6388ddd240a1576d49855262dd3 WatchSource:0}: Error finding container 443cb8d6a2866cf3d6f6269585797efb218cc6388ddd240a1576d49855262dd3: Status 404 returned error can't find the container with id 443cb8d6a2866cf3d6f6269585797efb218cc6388ddd240a1576d49855262dd3 Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.602199 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-471a1b79aa5e86cd02af440c01e9b655deb58e15e2e1060464489ab57bf2544b WatchSource:0}: Error finding container 471a1b79aa5e86cd02af440c01e9b655deb58e15e2e1060464489ab57bf2544b: Status 404 returned error can't find the container with id 471a1b79aa5e86cd02af440c01e9b655deb58e15e2e1060464489ab57bf2544b Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.604585 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.618331 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-414bd14d21574e2a3f67510a36f486ab5231d629537b2963859b59c21d71979c WatchSource:0}: Error finding container 414bd14d21574e2a3f67510a36f486ab5231d629537b2963859b59c21d71979c: Status 404 returned error can't find the container with id 414bd14d21574e2a3f67510a36f486ab5231d629537b2963859b59c21d71979c Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.626930 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.633820 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.636328 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-3a75bce634ba4cfbdfa5853ba8dfa4b97517841d1099aec8fb482d7fa85d2f3b WatchSource:0}: Error finding container 3a75bce634ba4cfbdfa5853ba8dfa4b97517841d1099aec8fb482d7fa85d2f3b: Status 404 returned error can't find the container with id 3a75bce634ba4cfbdfa5853ba8dfa4b97517841d1099aec8fb482d7fa85d2f3b Nov 22 09:03:24 crc kubenswrapper[4693]: W1122 09:03:24.641427 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-044327d35d9f1fd278c7e9dc54ff297f35f9a492a6f66187d8f9108f8535c2e6 WatchSource:0}: Error finding container 044327d35d9f1fd278c7e9dc54ff297f35f9a492a6f66187d8f9108f8535c2e6: Status 404 returned error can't find the container with id 044327d35d9f1fd278c7e9dc54ff297f35f9a492a6f66187d8f9108f8535c2e6 Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.715133 4693 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.249:6443: connect: connection refused" interval="800ms" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.910516 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.911932 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.911965 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.911981 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:24 crc kubenswrapper[4693]: I1122 09:03:24.912008 4693 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 09:03:24 crc kubenswrapper[4693]: E1122 09:03:24.912388 4693 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.249:6443: connect: connection refused" node="crc" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.106133 4693 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.108284 4693 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 10:34:25.141013803 +0000 UTC Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.108343 4693 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1081h31m0.032672881s for next certificate rotation Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.149637 4693 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="b704bd1eb0c9f3e5d1776ccbb017e84dac77fe401c86c21f1943eded19ad5528" exitCode=0 Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.149695 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"b704bd1eb0c9f3e5d1776ccbb017e84dac77fe401c86c21f1943eded19ad5528"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.149775 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"044327d35d9f1fd278c7e9dc54ff297f35f9a492a6f66187d8f9108f8535c2e6"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.149944 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.150860 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.150883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.150892 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.150906 4693 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23" exitCode=0 Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.150947 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.150962 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"3a75bce634ba4cfbdfa5853ba8dfa4b97517841d1099aec8fb482d7fa85d2f3b"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.151034 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.153908 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.153985 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.154002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.156254 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.156304 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"414bd14d21574e2a3f67510a36f486ab5231d629537b2963859b59c21d71979c"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.157797 4693 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a" exitCode=0 Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.157820 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.157871 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"443cb8d6a2866cf3d6f6269585797efb218cc6388ddd240a1576d49855262dd3"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.157943 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.158681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.158701 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.158709 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.160354 4693 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c0f3c9c38c0b7820d44c16ef58978627f25a945911eab5698bb04752b5caf2de" exitCode=0 Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.160395 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c0f3c9c38c0b7820d44c16ef58978627f25a945911eab5698bb04752b5caf2de"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.160480 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"471a1b79aa5e86cd02af440c01e9b655deb58e15e2e1060464489ab57bf2544b"} Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.160691 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.160790 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.161605 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.161632 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.161643 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.162223 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.162268 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.162284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:25 crc kubenswrapper[4693]: W1122 09:03:25.182526 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:25 crc kubenswrapper[4693]: E1122 09:03:25.182583 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:25 crc kubenswrapper[4693]: W1122 09:03:25.254196 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:25 crc kubenswrapper[4693]: E1122 09:03:25.254275 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:25 crc kubenswrapper[4693]: W1122 09:03:25.366500 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:25 crc kubenswrapper[4693]: E1122 09:03:25.366574 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:25 crc kubenswrapper[4693]: E1122 09:03:25.516276 4693 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 192.168.25.249:6443: connect: connection refused" interval="1.6s" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.712520 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:25 crc kubenswrapper[4693]: W1122 09:03:25.714148 4693 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 192.168.25.249:6443: connect: connection refused Nov 22 09:03:25 crc kubenswrapper[4693]: E1122 09:03:25.714224 4693 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 192.168.25.249:6443: connect: connection refused" logger="UnhandledError" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.714899 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.714931 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.714944 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:25 crc kubenswrapper[4693]: I1122 09:03:25.714969 4693 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 09:03:25 crc kubenswrapper[4693]: E1122 09:03:25.715322 4693 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 192.168.25.249:6443: connect: connection refused" node="crc" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.164350 4693 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="19bebc1eba2ce7a455bc926e0a938b25d3b9abe7539f24b8092299ea3806c5a3" exitCode=0 Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.164420 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"19bebc1eba2ce7a455bc926e0a938b25d3b9abe7539f24b8092299ea3806c5a3"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.164536 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.165233 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.165262 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.165271 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.166194 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"6622d62fffcf75abc0135291ed177bb6626a5ab62516d486cb8a57c2c33f71b9"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.166265 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.166872 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.166900 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.166910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169019 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169043 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169056 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169142 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169743 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169768 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.169798 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171237 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171268 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171281 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171272 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171810 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171837 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.171880 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.173750 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.173774 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.173786 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.173796 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.173804 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511"} Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.173889 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.174403 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.174424 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.174432 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.767431 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.863262 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:26 crc kubenswrapper[4693]: I1122 09:03:26.869604 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178512 4693 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="b0b3768892b6beac3facadc9b3966b982b96e3c38bd3aaf65b7983e4f98d028a" exitCode=0 Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178621 4693 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178651 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178664 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"b0b3768892b6beac3facadc9b3966b982b96e3c38bd3aaf65b7983e4f98d028a"} Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178680 4693 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178652 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178748 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.178720 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179763 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179775 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179787 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179814 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179839 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179865 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179867 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179879 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179886 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.179939 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.316111 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.317382 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.317417 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.317427 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:27 crc kubenswrapper[4693]: I1122 09:03:27.317452 4693 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184088 4693 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184151 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184072 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c80d5ca058dae1242bb66e024629b5ff0a41f0c9dbfe4284f157bb16223b1fda"} Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184248 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"667ffedd68155ade806f3e47e5df58841fa770343b6034e67f9b70a51ed625be"} Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184270 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b65ab201f5efa079d2ec4b2b68f04fa2993fee9fe4c66ea0e5d35a2ba39a03a1"} Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184280 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2e5c1f2db68b5d0bdace748b3775eea600c8cc7697ddb36e054f58f9b050cad1"} Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184292 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"312dae05572c93ee72294fe06ae641b3498eb04123eb8aec5102a722d1648e1b"} Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184496 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184906 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184940 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.184950 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.185618 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.185665 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.185676 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:28 crc kubenswrapper[4693]: I1122 09:03:28.419115 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.187216 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.188035 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.188084 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.188096 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.205177 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.205261 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.205881 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.205902 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.205910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.301818 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.658491 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.660143 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.664871 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.664897 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:29 crc kubenswrapper[4693]: I1122 09:03:29.664908 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.192038 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.192982 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.193024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.193037 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.199538 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.199821 4693 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.199926 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.201088 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.201118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:30 crc kubenswrapper[4693]: I1122 09:03:30.201127 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:31 crc kubenswrapper[4693]: I1122 09:03:31.820094 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:31 crc kubenswrapper[4693]: I1122 09:03:31.820200 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:31 crc kubenswrapper[4693]: I1122 09:03:31.821330 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:31 crc kubenswrapper[4693]: I1122 09:03:31.821374 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:31 crc kubenswrapper[4693]: I1122 09:03:31.821384 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:32 crc kubenswrapper[4693]: I1122 09:03:32.823163 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:32 crc kubenswrapper[4693]: I1122 09:03:32.823352 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:32 crc kubenswrapper[4693]: I1122 09:03:32.824489 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:32 crc kubenswrapper[4693]: I1122 09:03:32.824571 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:32 crc kubenswrapper[4693]: I1122 09:03:32.824586 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:34 crc kubenswrapper[4693]: E1122 09:03:34.210586 4693 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 22 09:03:34 crc kubenswrapper[4693]: I1122 09:03:34.698393 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:34 crc kubenswrapper[4693]: I1122 09:03:34.698522 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:34 crc kubenswrapper[4693]: I1122 09:03:34.699909 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:34 crc kubenswrapper[4693]: I1122 09:03:34.699954 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:34 crc kubenswrapper[4693]: I1122 09:03:34.699966 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:34 crc kubenswrapper[4693]: I1122 09:03:34.701978 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:35 crc kubenswrapper[4693]: I1122 09:03:35.201377 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:35 crc kubenswrapper[4693]: I1122 09:03:35.202126 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:35 crc kubenswrapper[4693]: I1122 09:03:35.202156 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:35 crc kubenswrapper[4693]: I1122 09:03:35.202164 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:36 crc kubenswrapper[4693]: I1122 09:03:36.107778 4693 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Nov 22 09:03:36 crc kubenswrapper[4693]: I1122 09:03:36.476901 4693 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 09:03:36 crc kubenswrapper[4693]: I1122 09:03:36.476956 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 09:03:36 crc kubenswrapper[4693]: I1122 09:03:36.480908 4693 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 22 09:03:36 crc kubenswrapper[4693]: I1122 09:03:36.480955 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 22 09:03:37 crc kubenswrapper[4693]: I1122 09:03:37.698893 4693 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 22 09:03:37 crc kubenswrapper[4693]: I1122 09:03:37.698955 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Nov 22 09:03:39 crc kubenswrapper[4693]: I1122 09:03:39.319910 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 22 09:03:39 crc kubenswrapper[4693]: I1122 09:03:39.320019 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:39 crc kubenswrapper[4693]: I1122 09:03:39.320780 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:39 crc kubenswrapper[4693]: I1122 09:03:39.320824 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:39 crc kubenswrapper[4693]: I1122 09:03:39.320834 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:39 crc kubenswrapper[4693]: I1122 09:03:39.328408 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.201825 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.202153 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.202969 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.203002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.203013 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.205015 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.211238 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.211302 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.211827 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.211871 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.211881 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.212075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.212102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:40 crc kubenswrapper[4693]: I1122 09:03:40.212110 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:41 crc kubenswrapper[4693]: E1122 09:03:41.463990 4693 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.465235 4693 trace.go:236] Trace[1366491972]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 09:03:28.328) (total time: 13136ms): Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[1366491972]: ---"Objects listed" error: 13136ms (09:03:41.465) Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[1366491972]: [13.136581126s] [13.136581126s] END Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.465265 4693 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.465777 4693 trace.go:236] Trace[21117757]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 09:03:28.249) (total time: 13216ms): Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[21117757]: ---"Objects listed" error: 13216ms (09:03:41.465) Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[21117757]: [13.216154277s] [13.216154277s] END Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.465799 4693 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.466056 4693 trace.go:236] Trace[91246800]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 09:03:27.825) (total time: 13640ms): Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[91246800]: ---"Objects listed" error: 13640ms (09:03:41.465) Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[91246800]: [13.640071587s] [13.640071587s] END Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.466081 4693 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.466178 4693 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 22 09:03:41 crc kubenswrapper[4693]: E1122 09:03:41.466512 4693 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.466778 4693 trace.go:236] Trace[680953700]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (22-Nov-2025 09:03:28.390) (total time: 13076ms): Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[680953700]: ---"Objects listed" error: 13076ms (09:03:41.466) Nov 22 09:03:41 crc kubenswrapper[4693]: Trace[680953700]: [13.076527756s] [13.076527756s] END Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.466802 4693 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.506619 4693 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": EOF" start-of-body= Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.506649 4693 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": EOF" start-of-body= Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.506662 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": EOF" Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.506684 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": EOF" Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.508011 4693 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:42392->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 22 09:03:41 crc kubenswrapper[4693]: I1122 09:03:41.508053 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:42392->192.168.126.11:17697: read: connection reset by peer" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.110896 4693 apiserver.go:52] "Watching apiserver" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.113565 4693 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.113797 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-5rjtn","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-qsptv","openshift-multus/multus-2s9rh","openshift-machine-config-operator/machine-config-daemon-scx6r","openshift-network-diagnostics/network-check-source-55646444c4-trplf"] Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.114140 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.114182 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.114191 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.114378 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.114428 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.114690 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.114748 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.114910 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.114962 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.115036 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.115095 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.115376 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.115387 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.116240 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.116409 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117298 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117568 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117579 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117608 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117684 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117812 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.117933 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118055 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118226 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118237 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118254 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118228 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118287 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118452 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118498 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118592 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.118733 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.119489 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.119525 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.119637 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.120236 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.122241 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.130589 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.137443 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.145886 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.153681 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.160035 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.164833 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.171532 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.177583 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.184636 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.190377 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.199444 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.210623 4693 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.216887 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.218279 4693 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1" exitCode=255 Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.218313 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1"} Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.228139 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.235025 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.241484 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.249071 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.255542 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.260175 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.266303 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270513 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270541 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270560 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270576 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270590 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270604 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270826 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270861 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270868 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270907 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270925 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270943 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270958 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270958 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.270975 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271035 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271058 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271060 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271082 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271101 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271119 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271137 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271150 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271154 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271183 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271201 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271189 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271218 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271233 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271249 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271241 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271253 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271292 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271309 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271325 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271340 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271356 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271370 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271264 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271384 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271318 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271399 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271334 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271413 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271428 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271443 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271460 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271473 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271489 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271504 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271518 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271532 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271547 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271562 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271579 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271593 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271630 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271646 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271669 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271684 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271699 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271713 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271728 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271743 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271758 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271772 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271786 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271824 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271858 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271873 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271887 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271901 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271917 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271930 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271945 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271959 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271972 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271986 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271370 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271415 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271442 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272012 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271541 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272074 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271561 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271582 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271598 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271612 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271719 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272113 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272126 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271727 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271738 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271784 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271905 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271988 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.271991 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272000 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272181 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272199 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272220 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272238 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272256 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272291 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272305 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272319 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272336 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272351 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272367 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272383 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272396 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272410 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272423 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272437 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272465 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272480 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272545 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272563 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272578 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272594 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272610 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272625 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272639 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272653 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272669 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272684 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272698 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272712 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272730 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272745 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272759 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272773 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272788 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272803 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272830 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272857 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272872 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272887 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272907 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272922 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272936 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272951 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272967 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272993 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273010 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273026 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273039 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273065 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273080 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273097 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273113 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273126 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273142 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273178 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273193 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273207 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273223 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273238 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273253 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273269 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273285 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273301 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273317 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273331 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273346 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273369 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273384 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273399 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273413 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273427 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273441 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273457 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273808 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273873 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273893 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273909 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273923 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273939 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273956 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273970 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273987 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274025 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274040 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274055 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274070 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274086 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274102 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274117 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274131 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274149 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274216 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274234 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274249 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272219 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274475 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272225 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274492 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274508 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274525 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274543 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274561 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274575 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274624 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274644 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274660 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274675 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274690 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274705 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274720 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274735 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274749 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274763 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274778 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274792 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274806 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274831 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274863 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274879 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274895 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274910 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274925 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274940 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274956 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274972 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274987 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275003 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275024 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275068 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-os-release\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275090 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275107 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/7007d901-fc52-4723-a949-db71619b3305-rootfs\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275123 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275138 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-kubelet\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275153 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx6z2\" (UniqueName: \"kubernetes.io/projected/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-kube-api-access-cx6z2\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275170 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275186 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-cni-binary-copy\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275199 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-netns\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275213 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzbdm\" (UniqueName: \"kubernetes.io/projected/7007d901-fc52-4723-a949-db71619b3305-kube-api-access-pzbdm\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275243 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-cni-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275259 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-etc-kubernetes\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275274 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qlj26\" (UniqueName: \"kubernetes.io/projected/5f914d43-b154-48b4-9a79-971e20908551-kube-api-access-qlj26\") pod \"node-resolver-qsptv\" (UID: \"5f914d43-b154-48b4-9a79-971e20908551\") " pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275291 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275306 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-hostroot\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275322 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275337 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-system-cni-dir\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275355 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-k8s-cni-cncf-io\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275387 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.275404 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5f914d43-b154-48b4-9a79-971e20908551-hosts-file\") pod \"node-resolver-qsptv\" (UID: \"5f914d43-b154-48b4-9a79-971e20908551\") " pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276684 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-daemon-config\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276720 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-multus-certs\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276781 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276802 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276850 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-os-release\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276873 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276895 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-cnibin\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276915 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-conf-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276932 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7007d901-fc52-4723-a949-db71619b3305-proxy-tls\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.276955 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280179 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-cni-multus\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280210 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7007d901-fc52-4723-a949-db71619b3305-mcd-auth-proxy-config\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280234 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cni-binary-copy\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280256 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-socket-dir-parent\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280280 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280302 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-system-cni-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280332 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-cni-bin\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280363 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280386 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cnibin\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280406 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280427 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280447 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzt7h\" (UniqueName: \"kubernetes.io/projected/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-kube-api-access-lzt7h\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280470 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280544 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280555 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280569 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280580 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280590 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280604 4693 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280615 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280624 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280634 4693 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280647 4693 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280657 4693 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280666 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280675 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280688 4693 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280698 4693 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280709 4693 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280719 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280730 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280739 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280748 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280760 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280769 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280782 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280791 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280803 4693 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280811 4693 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280832 4693 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280856 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280869 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280878 4693 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280887 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280896 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.287911 4693 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288569 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272284 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272528 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272745 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272869 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272927 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272947 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.272987 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273059 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273079 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288859 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273143 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273253 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273287 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273308 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273340 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273463 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273477 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273523 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273574 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273728 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273771 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273871 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273876 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273939 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.273960 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274059 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274105 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274131 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274312 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274342 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.289102 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274503 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274610 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274630 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274643 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.274705 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.289147 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277184 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277191 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277269 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277282 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277420 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277428 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277467 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277481 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277496 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277509 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277791 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.277868 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.278179 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.278566 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.278978 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.278980 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279167 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279244 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279251 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279426 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279489 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279513 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279578 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279591 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279327 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279970 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280013 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.279980 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280056 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280301 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280347 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280466 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280670 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.280875 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.281455 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.281723 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.281736 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.281787 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.282020 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.282082 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.282288 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.282642 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.282760 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.282792 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.283126 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.283306 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.283533 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.283554 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.283828 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.285068 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.285356 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.285304 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.285737 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.285929 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286062 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286081 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286503 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286521 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286627 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286209 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286754 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.286367 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:03:42.786349973 +0000 UTC m=+18.928852264 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286787 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.286914 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.287163 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.287587 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.287615 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.287828 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.287912 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288012 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.288169 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288191 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288249 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288453 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.288638 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.289419 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.289552 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.290457 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.290517 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.289618 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.290155 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.290371 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.290734 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:42.790716397 +0000 UTC m=+18.933218689 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.291135 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.295901 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.296570 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.297194 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.298433 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.298595 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.298655 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:42.798639492 +0000 UTC m=+18.941141783 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.299089 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.299787 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.299863 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.300019 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.300424 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.300698 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.301028 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.301150 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.300528 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.301762 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.302075 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.302107 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.302138 4693 scope.go:117] "RemoveContainer" containerID="cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.302155 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.302168 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.302179 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.302215 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:42.802204018 +0000 UTC m=+18.944706309 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.302301 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.302565 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.302909 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.303207 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.303362 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.303776 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.304064 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.304192 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-852ps"] Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.304649 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.305302 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.305322 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.305568 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.305926 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.306049 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.306339 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.306735 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307030 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307041 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307127 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307150 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307345 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307440 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307390 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.307533 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.308225 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.308274 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.308309 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.308366 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.308480 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309000 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309219 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309321 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309367 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309529 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.309437 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.309569 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.309581 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.309622 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:42.809612145 +0000 UTC m=+18.952114436 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309657 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309648 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309472 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309509 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.309954 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.310014 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.310498 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.311629 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.311997 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.317723 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.318242 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.323454 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.323733 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.327286 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.332603 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.334131 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.338772 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.345228 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.350947 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.356913 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.362655 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.373599 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.381889 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382258 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-cnibin\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382288 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-conf-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382306 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-bin\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382323 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7007d901-fc52-4723-a949-db71619b3305-proxy-tls\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382337 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7007d901-fc52-4723-a949-db71619b3305-mcd-auth-proxy-config\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382351 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cni-binary-copy\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382367 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-cni-multus\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382381 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-config\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382395 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-system-cni-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382410 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-socket-dir-parent\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382430 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-netns\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382443 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-script-lib\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382455 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-slash\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382476 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-netd\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382493 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-env-overrides\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382507 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-cni-bin\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382521 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cnibin\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382533 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-systemd\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382548 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382566 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzt7h\" (UniqueName: \"kubernetes.io/projected/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-kube-api-access-lzt7h\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382579 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-node-log\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382599 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2ndl\" (UniqueName: \"kubernetes.io/projected/2fa68d41-61c5-4781-8984-add9804c1b4b-kube-api-access-p2ndl\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382613 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-os-release\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382630 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382649 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/7007d901-fc52-4723-a949-db71619b3305-rootfs\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382662 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-ovn\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382679 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-log-socket\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382784 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-cnibin\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382693 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-ovn-kubernetes\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382833 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-conf-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382940 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-os-release\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.382977 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-cni-bin\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383219 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx6z2\" (UniqueName: \"kubernetes.io/projected/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-kube-api-access-cx6z2\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383242 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-systemd-units\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383251 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cnibin\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383283 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383302 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-kubelet\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383317 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-cni-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383320 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/7007d901-fc52-4723-a949-db71619b3305-rootfs\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383330 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-cni-binary-copy\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383359 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-netns\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383524 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-socket-dir-parent\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383533 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-cni-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383600 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383610 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-kubelet\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383631 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-var-lib-cni-multus\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383644 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383673 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzbdm\" (UniqueName: \"kubernetes.io/projected/7007d901-fc52-4723-a949-db71619b3305-kube-api-access-pzbdm\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383694 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-hostroot\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383730 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-etc-kubernetes\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383779 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qlj26\" (UniqueName: \"kubernetes.io/projected/5f914d43-b154-48b4-9a79-971e20908551-kube-api-access-qlj26\") pod \"node-resolver-qsptv\" (UID: \"5f914d43-b154-48b4-9a79-971e20908551\") " pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383793 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-hostroot\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383829 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-system-cni-dir\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383860 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-netns\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383879 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-etc-kubernetes\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383877 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383911 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-system-cni-dir\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383926 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-etc-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.383984 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-cni-binary-copy\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384057 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2fa68d41-61c5-4781-8984-add9804c1b4b-ovn-node-metrics-cert\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384132 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-system-cni-dir\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384155 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/7007d901-fc52-4723-a949-db71619b3305-mcd-auth-proxy-config\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384164 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-k8s-cni-cncf-io\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384190 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-k8s-cni-cncf-io\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384201 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-cni-binary-copy\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384212 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-daemon-config\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384253 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5f914d43-b154-48b4-9a79-971e20908551-hosts-file\") pod \"node-resolver-qsptv\" (UID: \"5f914d43-b154-48b4-9a79-971e20908551\") " pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384312 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-var-lib-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384341 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384356 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-os-release\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384360 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5f914d43-b154-48b4-9a79-971e20908551-hosts-file\") pod \"node-resolver-qsptv\" (UID: \"5f914d43-b154-48b4-9a79-971e20908551\") " pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384387 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384411 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384434 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-kubelet\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384518 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-os-release\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384521 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-multus-certs\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384650 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-host-run-multus-certs\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384763 4693 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384780 4693 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384792 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384802 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384810 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384829 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384852 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384862 4693 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384870 4693 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384879 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384889 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384898 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384906 4693 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384915 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384923 4693 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384928 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-tuning-conf-dir\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384931 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384963 4693 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384974 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384983 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.384993 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385001 4693 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385010 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385018 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385028 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385036 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385043 4693 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385051 4693 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385059 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385067 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385075 4693 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385083 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385092 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385099 4693 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385107 4693 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385115 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385123 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385131 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385138 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385147 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385158 4693 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385167 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385175 4693 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385184 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385193 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385200 4693 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385209 4693 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385216 4693 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385225 4693 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385232 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385239 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385249 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385258 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385266 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385274 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385284 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385292 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385301 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385308 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385317 4693 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385325 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385334 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385342 4693 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385350 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385358 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385365 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385323 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-multus-daemon-config\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385373 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385459 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385469 4693 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385478 4693 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385486 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385493 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385502 4693 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385510 4693 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385517 4693 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385525 4693 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385534 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385541 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385549 4693 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385556 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385563 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385571 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385579 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385588 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385595 4693 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385601 4693 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385609 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385616 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385624 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385631 4693 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385639 4693 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385647 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385654 4693 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385662 4693 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385671 4693 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385678 4693 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385703 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385711 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385719 4693 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385740 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385748 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385755 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385762 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385770 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385777 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385786 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385793 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385814 4693 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385832 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385851 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385860 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385868 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385877 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385884 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385892 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385899 4693 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385908 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385915 4693 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385923 4693 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385930 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385937 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385867 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/7007d901-fc52-4723-a949-db71619b3305-proxy-tls\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385944 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385976 4693 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385986 4693 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.385994 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386003 4693 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386011 4693 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386020 4693 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386028 4693 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386035 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386043 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386050 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386057 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386065 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386072 4693 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386080 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386087 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386094 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386102 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386110 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386117 4693 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386127 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386136 4693 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386144 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386150 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386158 4693 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386165 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386172 4693 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386179 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386193 4693 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386201 4693 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386208 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386216 4693 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386224 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386231 4693 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386240 4693 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386247 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386255 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386262 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386269 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386276 4693 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386284 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.386291 4693 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.389289 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.400278 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzbdm\" (UniqueName: \"kubernetes.io/projected/7007d901-fc52-4723-a949-db71619b3305-kube-api-access-pzbdm\") pod \"machine-config-daemon-scx6r\" (UID: \"7007d901-fc52-4723-a949-db71619b3305\") " pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.400353 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx6z2\" (UniqueName: \"kubernetes.io/projected/9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7-kube-api-access-cx6z2\") pod \"multus-2s9rh\" (UID: \"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\") " pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.400530 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qlj26\" (UniqueName: \"kubernetes.io/projected/5f914d43-b154-48b4-9a79-971e20908551-kube-api-access-qlj26\") pod \"node-resolver-qsptv\" (UID: \"5f914d43-b154-48b4-9a79-971e20908551\") " pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.401200 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzt7h\" (UniqueName: \"kubernetes.io/projected/bd8b5bd3-66f1-4495-babd-04ae3e5cea6f-kube-api-access-lzt7h\") pod \"multus-additional-cni-plugins-5rjtn\" (UID: \"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\") " pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.402906 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.410395 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.425605 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.432598 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 22 09:03:42 crc kubenswrapper[4693]: W1122 09:03:42.433194 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-6b203011294cebf4ca10509e1a220d75190449f53a2b64b7238f0ebb4bccc7d7 WatchSource:0}: Error finding container 6b203011294cebf4ca10509e1a220d75190449f53a2b64b7238f0ebb4bccc7d7: Status 404 returned error can't find the container with id 6b203011294cebf4ca10509e1a220d75190449f53a2b64b7238f0ebb4bccc7d7 Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.435890 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.441996 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.449179 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.453932 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-qsptv" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.457970 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-2s9rh" Nov 22 09:03:42 crc kubenswrapper[4693]: W1122 09:03:42.473632 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7007d901_fc52_4723_a949_db71619b3305.slice/crio-1047ea832d79d70f5d9960ec213eb0b1248ee37c457fbc94de37f2d6ff4daa10 WatchSource:0}: Error finding container 1047ea832d79d70f5d9960ec213eb0b1248ee37c457fbc94de37f2d6ff4daa10: Status 404 returned error can't find the container with id 1047ea832d79d70f5d9960ec213eb0b1248ee37c457fbc94de37f2d6ff4daa10 Nov 22 09:03:42 crc kubenswrapper[4693]: W1122 09:03:42.485459 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ffcea9f_546f_4aa6_aa52_f1f2a96d4ac7.slice/crio-a04e13a8a7a6c500edaddbea778b4bbd1e77375628f4b71d5599e87b70861c34 WatchSource:0}: Error finding container a04e13a8a7a6c500edaddbea778b4bbd1e77375628f4b71d5599e87b70861c34: Status 404 returned error can't find the container with id a04e13a8a7a6c500edaddbea778b4bbd1e77375628f4b71d5599e87b70861c34 Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487103 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-netns\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487126 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-config\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487159 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-script-lib\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487174 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-netd\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487187 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-env-overrides\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487201 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-slash\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487202 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-netns\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487246 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-systemd\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487263 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2ndl\" (UniqueName: \"kubernetes.io/projected/2fa68d41-61c5-4781-8984-add9804c1b4b-kube-api-access-p2ndl\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487270 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-netd\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487277 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487319 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487324 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-node-log\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487345 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-node-log\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487346 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-ovn\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487361 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-ovn\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487370 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-log-socket\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487414 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-ovn-kubernetes\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487429 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-systemd-units\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487446 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487479 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-etc-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487493 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2fa68d41-61c5-4781-8984-add9804c1b4b-ovn-node-metrics-cert\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487520 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-var-lib-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487552 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-kubelet\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487578 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-bin\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487665 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-bin\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487786 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-script-lib\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487835 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-config\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487862 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-systemd-units\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487899 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-systemd\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487879 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-slash\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.487382 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-log-socket\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.488044 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-ovn-kubernetes\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.488082 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.488087 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-etc-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.488102 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-var-lib-openvswitch\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.488141 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-kubelet\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.488239 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-env-overrides\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.493107 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2fa68d41-61c5-4781-8984-add9804c1b4b-ovn-node-metrics-cert\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.504966 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2ndl\" (UniqueName: \"kubernetes.io/projected/2fa68d41-61c5-4781-8984-add9804c1b4b-kube-api-access-p2ndl\") pod \"ovnkube-node-852ps\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.620887 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:42 crc kubenswrapper[4693]: W1122 09:03:42.638663 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2fa68d41_61c5_4781_8984_add9804c1b4b.slice/crio-81af516e48f6a2f81f5a5ace52de774b1cde8c1d50c7263d9fe40ea0beb93812 WatchSource:0}: Error finding container 81af516e48f6a2f81f5a5ace52de774b1cde8c1d50c7263d9fe40ea0beb93812: Status 404 returned error can't find the container with id 81af516e48f6a2f81f5a5ace52de774b1cde8c1d50c7263d9fe40ea0beb93812 Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.790729 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.790799 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.790890 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:03:43.790877039 +0000 UTC m=+19.933379330 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.790927 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.790989 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:43.790975735 +0000 UTC m=+19.933478026 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.891538 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.891602 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:42 crc kubenswrapper[4693]: I1122 09:03:42.891630 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891736 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891766 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891776 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891777 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891781 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891801 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.891866 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.892116 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:43.891813364 +0000 UTC m=+20.034315656 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.892155 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:43.89214561 +0000 UTC m=+20.034647901 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:42 crc kubenswrapper[4693]: E1122 09:03:42.892168 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:43.892162111 +0000 UTC m=+20.034664401 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.221152 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" exitCode=0 Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.221233 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.221541 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"81af516e48f6a2f81f5a5ace52de774b1cde8c1d50c7263d9fe40ea0beb93812"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.223393 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerStarted","Data":"b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.223449 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerStarted","Data":"a04e13a8a7a6c500edaddbea778b4bbd1e77375628f4b71d5599e87b70861c34"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.225732 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.225777 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.225789 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"85b5a9946d7117aa5a9284d1d226189a971e0e830523686667ffd15ff63de28f"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.227189 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.232239 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.232719 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.232957 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.246224 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-qsptv" event={"ID":"5f914d43-b154-48b4-9a79-971e20908551","Type":"ContainerStarted","Data":"201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.246246 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-qsptv" event={"ID":"5f914d43-b154-48b4-9a79-971e20908551","Type":"ContainerStarted","Data":"b2469660dbcc70b8c46a8a27fc2b558c0976a522d38fe5c237d806aebbe603e9"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.247436 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.247460 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"6b203011294cebf4ca10509e1a220d75190449f53a2b64b7238f0ebb4bccc7d7"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.249079 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.249102 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.249111 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"1047ea832d79d70f5d9960ec213eb0b1248ee37c457fbc94de37f2d6ff4daa10"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.257101 4693 generic.go:334] "Generic (PLEG): container finished" podID="bd8b5bd3-66f1-4495-babd-04ae3e5cea6f" containerID="33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08" exitCode=0 Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.257153 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerDied","Data":"33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.257169 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerStarted","Data":"da6ced2fa472be678340ebbed6970be68a6886c1f7af052c05e58cae9fa0f122"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.258023 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.260827 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"08702c1ef208f8417a07e27d49428aceadfab89b1d3727697685178c3d5c741c"} Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.285406 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.304125 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.314309 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.325615 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.335444 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.351080 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.362242 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.375383 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.384449 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.393396 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.401453 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.410328 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.419601 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.432604 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.442500 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.451232 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.464796 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.472038 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.480391 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.490988 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.501064 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.509378 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:43Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.799693 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.799789 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:03:45.79977002 +0000 UTC m=+21.942272310 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.800260 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.800392 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.800435 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:45.80042431 +0000 UTC m=+21.942926602 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.901314 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.901373 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:43 crc kubenswrapper[4693]: I1122 09:03:43.901399 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901472 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901504 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901530 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901544 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901517 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:45.901505248 +0000 UTC m=+22.044007538 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901597 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:45.901582703 +0000 UTC m=+22.044084994 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901638 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901646 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901653 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:43 crc kubenswrapper[4693]: E1122 09:03:43.901672 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:45.901667052 +0000 UTC m=+22.044169343 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.146543 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.146563 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.146653 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.146699 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.146777 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.146874 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.150348 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.151165 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.152218 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.152868 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.153835 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.154340 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.154888 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.155759 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.156424 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.157253 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.157747 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.158660 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.159087 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.159552 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.160241 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.161130 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.161599 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.162493 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.162904 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.163388 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.164491 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.165032 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.165921 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.166326 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.167242 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.167747 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.167788 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.168352 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.169329 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.169757 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.170722 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.171175 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.171944 4693 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.172040 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.173507 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.174418 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.174834 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.176289 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.177014 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.177528 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.178837 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.179444 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.180406 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.180966 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.181884 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.182439 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.179426 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.183218 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.183700 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.184507 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.185177 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.189118 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.189580 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.190442 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.190951 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.191444 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.192316 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.205249 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.229033 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.241980 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.251111 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.259884 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.264086 4693 generic.go:334] "Generic (PLEG): container finished" podID="bd8b5bd3-66f1-4495-babd-04ae3e5cea6f" containerID="9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b" exitCode=0 Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.264135 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerDied","Data":"9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.268492 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.268594 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.268666 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.268724 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.268780 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.268856 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.271971 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.281617 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.290318 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.299760 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.309408 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.321642 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.329391 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.338249 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.348661 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.357044 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.365579 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.378499 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.386954 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.395369 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.405010 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.415413 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.666988 4693 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.668444 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.668476 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.668484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.668534 4693 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.673199 4693 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.673368 4693 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.674075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.674101 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.674111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.674124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.674132 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.682516 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.684673 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.684699 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.684707 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.684717 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.684725 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.692546 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.694671 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.694692 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.694701 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.694710 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.694718 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.701183 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.702606 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.704639 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.705171 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.705205 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.705214 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.705227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.705236 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.706886 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.711880 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.714060 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.716281 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.716306 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.716316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.716332 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.716340 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.722522 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.724622 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: E1122 09:03:44.724724 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.725872 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.725898 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.725907 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.725917 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.725924 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.730569 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.739298 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.747653 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.755364 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.761691 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.769601 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.777105 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.784500 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.791549 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.805509 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.827597 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.827628 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.827635 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.827647 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.827655 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.834649 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.874037 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.914916 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.929672 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.929957 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.929967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.929982 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.929991 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:44Z","lastTransitionTime":"2025-11-22T09:03:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.956079 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:44 crc kubenswrapper[4693]: I1122 09:03:44.994915 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.031931 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.031968 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.031978 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.031992 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.032003 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.035027 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.074307 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.119601 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.133366 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.133416 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.133428 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.133443 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.133453 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.155595 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.195193 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.234942 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.234976 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.234984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.234996 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.235005 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.236154 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.271791 4693 generic.go:334] "Generic (PLEG): container finished" podID="bd8b5bd3-66f1-4495-babd-04ae3e5cea6f" containerID="f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6" exitCode=0 Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.271875 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerDied","Data":"f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.272798 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.275394 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.292201 4693 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.337614 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.337645 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.337654 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.337666 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.337675 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.338125 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.378738 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.414549 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.439164 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.439194 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.439203 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.439215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.439225 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.455009 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.496499 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.535748 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.541539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.541569 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.541577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.541590 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.541600 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.576434 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.614806 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.643856 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.643884 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.643893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.643904 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.643912 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.653462 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.695680 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.745968 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.745999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.746009 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.746022 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.746030 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.750982 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.776345 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.816114 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.817324 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.817425 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.817529 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.817566 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:03:49.817542082 +0000 UTC m=+25.960044373 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.817592 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:49.817585744 +0000 UTC m=+25.960088036 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.847727 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.847756 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.847765 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.847778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.847787 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.854535 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:45Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.918885 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.918949 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.918978 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919042 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919082 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:49.919070351 +0000 UTC m=+26.061572642 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919096 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919148 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919183 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919209 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919267 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:49.919248427 +0000 UTC m=+26.061750718 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919154 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919309 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:45 crc kubenswrapper[4693]: E1122 09:03:45.919340 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:49.919330661 +0000 UTC m=+26.061832952 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.950075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.950105 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.950114 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.950126 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:45 crc kubenswrapper[4693]: I1122 09:03:45.950135 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:45Z","lastTransitionTime":"2025-11-22T09:03:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.051901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.051931 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.051941 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.051953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.051962 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.146538 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.146564 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:46 crc kubenswrapper[4693]: E1122 09:03:46.146642 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.146929 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:46 crc kubenswrapper[4693]: E1122 09:03:46.146989 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:46 crc kubenswrapper[4693]: E1122 09:03:46.147038 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.153683 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.153712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.153721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.153734 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.153742 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.255961 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.255999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.256009 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.256021 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.256029 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.278559 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.280922 4693 generic.go:334] "Generic (PLEG): container finished" podID="bd8b5bd3-66f1-4495-babd-04ae3e5cea6f" containerID="427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe" exitCode=0 Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.280991 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerDied","Data":"427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.291515 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.314331 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.324313 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.333148 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.340995 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.348985 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.358393 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.358415 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.358423 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.358436 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.358445 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.358442 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.367248 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.375276 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.381683 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.389708 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.397111 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.404332 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.460110 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.460143 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.460151 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.460166 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.460173 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.562259 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.562299 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.562308 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.562321 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.562331 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.664124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.664163 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.664173 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.664188 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.664199 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.765947 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.765978 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.765987 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.765998 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.766007 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.867946 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.867984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.867992 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.868006 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.868014 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.969691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.969721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.969729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.969742 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:46 crc kubenswrapper[4693]: I1122 09:03:46.969752 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:46Z","lastTransitionTime":"2025-11-22T09:03:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.071562 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.071597 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.071605 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.071626 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.071637 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.173386 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.173415 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.173424 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.173435 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.173443 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.275496 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.275530 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.275538 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.275550 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.275559 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.285078 4693 generic.go:334] "Generic (PLEG): container finished" podID="bd8b5bd3-66f1-4495-babd-04ae3e5cea6f" containerID="f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93" exitCode=0 Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.285115 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerDied","Data":"f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.293679 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.303691 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.315606 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.324222 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.333089 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.340683 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.347757 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.356767 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.366393 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.374077 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.377082 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.377117 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.377126 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.377145 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.377155 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.381975 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.389532 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.405361 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:47Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.478409 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.478441 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.478451 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.478464 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.478480 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.579922 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.579955 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.579964 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.579975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.579984 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.681315 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.681339 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.681347 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.681357 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.681365 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.782921 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.782954 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.782963 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.782975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.782984 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.884583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.884696 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.884764 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.884839 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.884914 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.986568 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.986742 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.986839 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.986915 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:47 crc kubenswrapper[4693]: I1122 09:03:47.986967 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:47Z","lastTransitionTime":"2025-11-22T09:03:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.024446 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-j6drd"] Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.024758 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.026101 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.026819 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.027509 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.027699 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.035168 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.043424 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.049610 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.057410 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.064941 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.072315 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.078466 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.085553 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.088669 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.088698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.088707 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.088718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.088727 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.097808 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.107769 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.115491 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.122484 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.130759 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.136911 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b387eae-f6b6-42ec-9736-176d13068eea-host\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.136943 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ng2gg\" (UniqueName: \"kubernetes.io/projected/2b387eae-f6b6-42ec-9736-176d13068eea-kube-api-access-ng2gg\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.136973 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2b387eae-f6b6-42ec-9736-176d13068eea-serviceca\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.141404 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.146969 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:48 crc kubenswrapper[4693]: E1122 09:03:48.147127 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.147447 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:48 crc kubenswrapper[4693]: E1122 09:03:48.147564 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.147737 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:48 crc kubenswrapper[4693]: E1122 09:03:48.147859 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.190441 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.190471 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.190481 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.190493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.190501 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.238292 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b387eae-f6b6-42ec-9736-176d13068eea-host\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.238471 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ng2gg\" (UniqueName: \"kubernetes.io/projected/2b387eae-f6b6-42ec-9736-176d13068eea-kube-api-access-ng2gg\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.238502 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2b387eae-f6b6-42ec-9736-176d13068eea-serviceca\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.238474 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/2b387eae-f6b6-42ec-9736-176d13068eea-host\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.239331 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/2b387eae-f6b6-42ec-9736-176d13068eea-serviceca\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.251122 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ng2gg\" (UniqueName: \"kubernetes.io/projected/2b387eae-f6b6-42ec-9736-176d13068eea-kube-api-access-ng2gg\") pod \"node-ca-j6drd\" (UID: \"2b387eae-f6b6-42ec-9736-176d13068eea\") " pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.292251 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.292274 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.292283 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.292295 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.292303 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.294075 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.294265 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.294295 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.298985 4693 generic.go:334] "Generic (PLEG): container finished" podID="bd8b5bd3-66f1-4495-babd-04ae3e5cea6f" containerID="867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c" exitCode=0 Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.299013 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerDied","Data":"867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.307310 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.311397 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.311704 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.316837 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.326447 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.334242 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-j6drd" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.335838 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.344277 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: W1122 09:03:48.346387 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2b387eae_f6b6_42ec_9736_176d13068eea.slice/crio-02af7062ab3787ae64465a5e83276a533c9f2606b8c85454e1fcb0162c1341c8 WatchSource:0}: Error finding container 02af7062ab3787ae64465a5e83276a533c9f2606b8c85454e1fcb0162c1341c8: Status 404 returned error can't find the container with id 02af7062ab3787ae64465a5e83276a533c9f2606b8c85454e1fcb0162c1341c8 Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.352946 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.361260 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.368739 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.377033 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.385715 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.393504 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.395047 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.395141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.395151 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.395164 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.395172 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.401578 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.410227 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.416953 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.426042 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.433479 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.443240 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.453390 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.461926 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.469918 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.477507 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.488783 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.497203 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.497236 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.497244 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.497257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.497267 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.500629 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.515355 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.554105 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.595466 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.598729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.598760 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.598769 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.598779 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.598788 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.635873 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.674228 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:48Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.700484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.700503 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.700511 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.700524 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.700532 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.802131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.802167 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.802175 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.802188 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.802196 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.903752 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.903781 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.903790 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.903800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:48 crc kubenswrapper[4693]: I1122 09:03:48.903808 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:48Z","lastTransitionTime":"2025-11-22T09:03:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.005541 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.005565 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.005574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.005588 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.005597 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.106883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.106927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.106938 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.106953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.106964 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.208682 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.208719 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.208728 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.208744 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.208756 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.304290 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" event={"ID":"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f","Type":"ContainerStarted","Data":"e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.305273 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-j6drd" event={"ID":"2b387eae-f6b6-42ec-9736-176d13068eea","Type":"ContainerStarted","Data":"b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.305312 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-j6drd" event={"ID":"2b387eae-f6b6-42ec-9736-176d13068eea","Type":"ContainerStarted","Data":"02af7062ab3787ae64465a5e83276a533c9f2606b8c85454e1fcb0162c1341c8"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.305343 4693 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.310284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.310314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.310322 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.310333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.310343 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.314423 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.322110 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.330279 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.338166 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.345600 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.353087 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.361329 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.369152 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.375193 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.382522 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.390260 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.397203 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.404084 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.411536 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.411563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.411572 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.411583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.411591 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.415642 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.423052 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.430582 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.437348 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.444985 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.455758 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.475053 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.513348 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.513442 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.513505 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.513561 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.513613 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.514637 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.553596 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.595127 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.615322 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.615355 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.615363 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.615376 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.615384 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.637643 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.676359 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.713936 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.717197 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.717230 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.717267 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.717280 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.717289 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.755390 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.801220 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:49Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.819434 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.819467 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.819476 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.819492 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.819501 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.849733 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.849818 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.849902 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:03:57.84988663 +0000 UTC m=+33.992388921 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.849927 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.849971 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:57.849959867 +0000 UTC m=+33.992462159 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.921384 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.921414 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.921423 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.921434 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.921442 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:49Z","lastTransitionTime":"2025-11-22T09:03:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.950815 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.951030 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:49 crc kubenswrapper[4693]: I1122 09:03:49.951059 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.950964 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951140 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:57.951121748 +0000 UTC m=+34.093624038 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951148 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951163 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951173 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951204 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:57.951195275 +0000 UTC m=+34.093697566 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951243 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951252 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951258 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:49 crc kubenswrapper[4693]: E1122 09:03:49.951276 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:57.951270027 +0000 UTC m=+34.093772317 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.023266 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.023297 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.023306 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.023319 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.023329 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.125558 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.125593 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.125602 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.125615 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.125630 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.145867 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.145883 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.145908 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:50 crc kubenswrapper[4693]: E1122 09:03:50.145976 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:50 crc kubenswrapper[4693]: E1122 09:03:50.146051 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:50 crc kubenswrapper[4693]: E1122 09:03:50.146108 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.227464 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.227499 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.227508 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.227522 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.227531 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.309066 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/0.log" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.311324 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8" exitCode=1 Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.311348 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.311924 4693 scope.go:117] "RemoveContainer" containerID="435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.320561 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.327539 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.328632 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.328660 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.328669 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.328681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.328690 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.337531 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.346423 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.352733 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.360818 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.368951 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.376302 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.389451 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 09:03:50.069567 5938 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 09:03:50.069680 5938 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 09:03:50.069817 5938 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 09:03:50.069925 5938 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 09:03:50.070092 5938 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 09:03:50.070111 5938 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 09:03:50.070128 5938 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 09:03:50.070181 5938 factory.go:656] Stopping watch factory\\\\nI1122 09:03:50.070193 5938 ovnkube.go:599] Stopped ovnkube\\\\nI1122 09:03:50.070150 5938 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 09:03:50.070154 5938 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.399125 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.408734 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.418730 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.429982 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.430127 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.430161 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.430170 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.430183 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.430192 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.438597 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:50Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.532598 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.532634 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.532642 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.532657 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.532690 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.634251 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.634280 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.634288 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.634301 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.634309 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.735894 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.735930 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.735940 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.735953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.735963 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.837376 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.837411 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.837420 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.837432 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.837441 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.939042 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.939077 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.939085 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.939100 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:50 crc kubenswrapper[4693]: I1122 09:03:50.939109 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:50Z","lastTransitionTime":"2025-11-22T09:03:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.040791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.040825 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.040865 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.040879 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.040888 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.142767 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.142801 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.142812 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.142825 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.142863 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.244754 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.244782 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.244791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.244803 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.244811 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.315418 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/1.log" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.315925 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/0.log" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.318312 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5" exitCode=1 Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.318339 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.318462 4693 scope.go:117] "RemoveContainer" containerID="435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.318861 4693 scope.go:117] "RemoveContainer" containerID="87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5" Nov 22 09:03:51 crc kubenswrapper[4693]: E1122 09:03:51.318998 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.328287 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.335817 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.344267 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.346302 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.346326 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.346352 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.346365 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.346373 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.353731 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.362447 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.370271 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.376997 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.384947 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.422194 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.434218 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.441709 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.448603 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.448634 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.448643 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.448655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.448664 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.450140 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.458132 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.469977 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://435406f91af5115260086f3ae6d51e827ad9fd88c250abe19ada7f967f74c8c8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 09:03:50.069567 5938 reflector.go:311] Stopping reflector *v1.Pod (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 09:03:50.069680 5938 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1122 09:03:50.069817 5938 reflector.go:311] Stopping reflector *v1.EgressQoS (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressqos/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 09:03:50.069925 5938 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI1122 09:03:50.070092 5938 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1122 09:03:50.070111 5938 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1122 09:03:50.070128 5938 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1122 09:03:50.070181 5938 factory.go:656] Stopping watch factory\\\\nI1122 09:03:50.070193 5938 ovnkube.go:599] Stopped ovnkube\\\\nI1122 09:03:50.070150 5938 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1122 09:03:50.070154 5938 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1122 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:51Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.550398 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.550429 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.550438 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.550453 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.550462 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.652152 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.652184 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.652196 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.652208 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.652218 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.753951 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.753979 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.753988 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.754001 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.754009 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.855577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.855611 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.855620 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.855632 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.855640 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.957708 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.957739 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.957746 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.957776 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:51 crc kubenswrapper[4693]: I1122 09:03:51.957786 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:51Z","lastTransitionTime":"2025-11-22T09:03:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.059612 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.059644 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.059670 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.059682 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.059692 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.146228 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.146233 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:52 crc kubenswrapper[4693]: E1122 09:03:52.146349 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.146356 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:52 crc kubenswrapper[4693]: E1122 09:03:52.146476 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:52 crc kubenswrapper[4693]: E1122 09:03:52.146541 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.161908 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.161999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.162067 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.162124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.162235 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.263588 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.263615 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.263625 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.263638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.263646 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.321053 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/1.log" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.323460 4693 scope.go:117] "RemoveContainer" containerID="87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5" Nov 22 09:03:52 crc kubenswrapper[4693]: E1122 09:03:52.323574 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.331399 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.339508 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.346050 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.353021 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.365044 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.365566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.365595 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.365603 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.365616 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.365632 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.375544 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.383261 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.390961 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.397764 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.405037 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.412936 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.421156 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.428558 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.434759 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.467094 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.467122 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.467131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.467141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.467149 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.569257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.569285 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.569293 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.569304 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.569312 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.670533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.670562 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.670570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.670580 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.670587 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.772095 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.772129 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.772138 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.772149 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.772158 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.827990 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.836297 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.844135 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.850590 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.857592 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.869467 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.873628 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.873653 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.873666 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.873678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.873686 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.879220 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.887372 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.895239 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.902438 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.910100 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.918890 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.928137 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.935768 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.941925 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:52Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.975441 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.975467 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.975475 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.975486 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:52 crc kubenswrapper[4693]: I1122 09:03:52.975494 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:52Z","lastTransitionTime":"2025-11-22T09:03:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.077075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.077099 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.077107 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.077118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.077126 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.178692 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.178718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.178725 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.178735 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.178743 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.280761 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.280788 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.280796 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.280809 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.280817 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.382921 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.382950 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.382959 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.382970 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.382977 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.484563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.484623 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.484631 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.484645 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.484653 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.586624 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.586655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.586663 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.586676 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.586685 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.688354 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.688388 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.688397 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.688409 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.688418 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.789776 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.789803 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.789811 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.789821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.789828 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.891730 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.891768 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.891777 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.891787 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.891795 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.993914 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.993949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.993957 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.993971 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:53 crc kubenswrapper[4693]: I1122 09:03:53.993979 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:53Z","lastTransitionTime":"2025-11-22T09:03:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.026378 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn"] Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.026882 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.028161 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.028359 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.035551 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.044927 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.052375 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.060182 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.068238 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.075468 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.081616 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.090335 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.095574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.095602 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.095611 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.095623 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.095631 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.099065 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.106667 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.114082 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.121640 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.128051 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.139217 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.146134 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.146183 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.146174 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.146234 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.146343 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.146393 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.146524 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.156421 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.165003 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.172591 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.178685 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.185270 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/01958f13-f4ba-4d92-bd7e-33fa3378f029-env-overrides\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.185303 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc45w\" (UniqueName: \"kubernetes.io/projected/01958f13-f4ba-4d92-bd7e-33fa3378f029-kube-api-access-jc45w\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.185325 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/01958f13-f4ba-4d92-bd7e-33fa3378f029-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.185359 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/01958f13-f4ba-4d92-bd7e-33fa3378f029-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.186096 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.193168 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.196863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.196888 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.196896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.196906 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.196915 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.200384 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.206865 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.224719 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.234229 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.240687 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.248384 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.256289 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.263215 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.270358 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.286097 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/01958f13-f4ba-4d92-bd7e-33fa3378f029-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.286136 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/01958f13-f4ba-4d92-bd7e-33fa3378f029-env-overrides\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.286164 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc45w\" (UniqueName: \"kubernetes.io/projected/01958f13-f4ba-4d92-bd7e-33fa3378f029-kube-api-access-jc45w\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.286183 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/01958f13-f4ba-4d92-bd7e-33fa3378f029-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.286647 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/01958f13-f4ba-4d92-bd7e-33fa3378f029-env-overrides\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.286674 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/01958f13-f4ba-4d92-bd7e-33fa3378f029-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.290353 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/01958f13-f4ba-4d92-bd7e-33fa3378f029-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.298775 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.298804 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.298813 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.298824 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.298857 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.299140 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc45w\" (UniqueName: \"kubernetes.io/projected/01958f13-f4ba-4d92-bd7e-33fa3378f029-kube-api-access-jc45w\") pod \"ovnkube-control-plane-749d76644c-sxbhn\" (UID: \"01958f13-f4ba-4d92-bd7e-33fa3378f029\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.335785 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.400428 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.400453 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.400461 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.400473 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.400481 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.502591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.502620 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.502628 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.502641 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.502649 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.604485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.604519 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.604534 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.604547 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.604555 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.706231 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.706276 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.706287 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.706304 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.706319 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.754605 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.754638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.754647 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.754658 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.754666 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.762573 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.764824 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.764878 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.764888 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.764901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.764910 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.772572 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.775169 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.775202 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.775212 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.775227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.775236 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.782875 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.785180 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.785214 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.785223 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.785236 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.785244 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.792871 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.795065 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.795093 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.795102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.795113 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.795119 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.802511 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:54 crc kubenswrapper[4693]: E1122 09:03:54.802611 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.808367 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.808393 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.808402 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.808412 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.808420 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.910087 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.910118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.910127 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.910139 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:54 crc kubenswrapper[4693]: I1122 09:03:54.910147 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:54Z","lastTransitionTime":"2025-11-22T09:03:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.011482 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.011519 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.011531 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.011546 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.011556 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.089197 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-t4blm"] Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.089550 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: E1122 09:03:55.089606 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.098280 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.105567 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.112097 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.113053 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.113081 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.113089 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.113101 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.113109 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.119106 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.125972 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.138069 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.145179 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.153076 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.160901 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.167998 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.175329 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.184070 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.192151 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.193386 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.193416 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp9tg\" (UniqueName: \"kubernetes.io/projected/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-kube-api-access-dp9tg\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.199625 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.206323 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.214232 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.214957 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.214985 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.214995 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.215007 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.215018 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.294659 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.294715 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp9tg\" (UniqueName: \"kubernetes.io/projected/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-kube-api-access-dp9tg\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: E1122 09:03:55.294752 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:55 crc kubenswrapper[4693]: E1122 09:03:55.294809 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:55.79479577 +0000 UTC m=+31.937298071 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.308640 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp9tg\" (UniqueName: \"kubernetes.io/projected/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-kube-api-access-dp9tg\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.316209 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.316241 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.316250 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.316262 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.316270 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.330926 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" event={"ID":"01958f13-f4ba-4d92-bd7e-33fa3378f029","Type":"ContainerStarted","Data":"faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.330966 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" event={"ID":"01958f13-f4ba-4d92-bd7e-33fa3378f029","Type":"ContainerStarted","Data":"b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.330976 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" event={"ID":"01958f13-f4ba-4d92-bd7e-33fa3378f029","Type":"ContainerStarted","Data":"5ff3893cf883a9416f629f69fcedf239cd5e826c15ed766426e1072b0eb0dc66"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.339192 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.346949 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.353340 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.359874 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.366998 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.378739 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.386795 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.394611 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.401764 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.409469 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.417959 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.417989 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.417999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.418011 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.418019 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.418666 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.426239 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.434623 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.442346 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.448495 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.456055 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:03:55Z is after 2025-08-24T17:21:41Z" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.519950 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.519979 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.519988 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.519999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.520007 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.621222 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.621273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.621283 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.621298 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.621306 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.722656 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.722681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.722691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.722712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.722743 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.799171 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:55 crc kubenswrapper[4693]: E1122 09:03:55.799266 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:55 crc kubenswrapper[4693]: E1122 09:03:55.799311 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:56.799299131 +0000 UTC m=+32.941801422 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.824620 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.824647 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.824655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.824667 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.824676 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.926352 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.926385 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.926395 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.926410 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:55 crc kubenswrapper[4693]: I1122 09:03:55.926448 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:55Z","lastTransitionTime":"2025-11-22T09:03:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.028173 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.028210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.028218 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.028230 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.028239 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.130366 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.130395 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.130404 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.130414 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.130422 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.146734 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.146769 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:56 crc kubenswrapper[4693]: E1122 09:03:56.146828 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:56 crc kubenswrapper[4693]: E1122 09:03:56.146970 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.147329 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:56 crc kubenswrapper[4693]: E1122 09:03:56.147444 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.232433 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.232462 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.232471 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.232482 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.232493 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.333591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.333618 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.333625 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.333637 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.333644 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.435766 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.435804 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.435812 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.435825 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.435858 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.537457 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.537496 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.537506 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.537521 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.537531 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.639823 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.639880 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.639891 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.639910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.639920 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.742354 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.742559 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.742568 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.742581 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.742589 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.808689 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:56 crc kubenswrapper[4693]: E1122 09:03:56.808782 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:56 crc kubenswrapper[4693]: E1122 09:03:56.808822 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:03:58.808809804 +0000 UTC m=+34.951312095 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.843992 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.844029 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.844040 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.844053 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.844066 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.945887 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.945925 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.945935 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.945948 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:56 crc kubenswrapper[4693]: I1122 09:03:56.945957 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:56Z","lastTransitionTime":"2025-11-22T09:03:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.047768 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.047813 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.047822 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.047853 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.047862 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.146332 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:57 crc kubenswrapper[4693]: E1122 09:03:57.146436 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.149301 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.149341 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.149352 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.149362 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.149370 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.250646 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.250681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.250689 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.250703 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.250713 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.352787 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.352814 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.352822 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.352832 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.352864 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.454545 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.454600 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.454609 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.454618 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.454626 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.556526 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.556562 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.556570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.556583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.556592 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.658543 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.658570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.658580 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.658591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.658600 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.760964 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.760985 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.760993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.761002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.761009 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.862919 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.862953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.862963 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.862975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.862984 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.918451 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.918559 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:57 crc kubenswrapper[4693]: E1122 09:03:57.918680 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:57 crc kubenswrapper[4693]: E1122 09:03:57.918700 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:04:13.918677281 +0000 UTC m=+50.061179601 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:03:57 crc kubenswrapper[4693]: E1122 09:03:57.918738 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:13.918724308 +0000 UTC m=+50.061226600 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.964749 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.964778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.964786 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.964799 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:57 crc kubenswrapper[4693]: I1122 09:03:57.964808 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:57Z","lastTransitionTime":"2025-11-22T09:03:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.019872 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.019913 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.019933 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020007 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020027 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020045 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020057 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020070 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:14.020052571 +0000 UTC m=+50.162554862 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020012 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020087 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020093 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020088 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:14.020080684 +0000 UTC m=+50.162582975 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.020124 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:14.020115118 +0000 UTC m=+50.162617400 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.066516 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.066564 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.066574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.066587 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.066596 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.146049 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.146063 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.146106 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.146209 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.146272 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.146335 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.168595 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.168618 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.168626 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.168636 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.168643 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.270789 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.270877 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.270895 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.270912 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.270924 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.372145 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.372172 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.372181 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.372190 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.372196 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.474258 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.474284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.474293 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.474303 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.474312 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.575575 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.575610 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.575621 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.575634 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.575643 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.677001 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.677036 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.677043 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.677054 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.677063 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.778424 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.778454 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.778463 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.778475 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.778484 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.826049 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.826149 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: E1122 09:03:58.826203 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:02.826188306 +0000 UTC m=+38.968690607 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.880495 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.880530 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.880538 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.880551 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.880559 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.982384 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.982416 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.982425 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.982438 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:58 crc kubenswrapper[4693]: I1122 09:03:58.982448 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:58Z","lastTransitionTime":"2025-11-22T09:03:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.084190 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.084213 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.084221 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.084231 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.084240 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.146103 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:03:59 crc kubenswrapper[4693]: E1122 09:03:59.146222 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.185855 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.185883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.185891 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.185903 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.185911 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.287030 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.287065 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.287074 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.287087 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.287097 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.388431 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.388465 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.388473 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.388485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.388493 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.490111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.490140 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.490148 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.490158 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.490165 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.591526 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.591554 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.591562 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.591574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.591582 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.693800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.693829 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.693856 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.693866 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.693874 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.795132 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.795162 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.795172 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.795182 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.795191 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.896893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.896921 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.896929 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.896940 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.896948 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.998725 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.998759 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.998770 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.998785 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:03:59 crc kubenswrapper[4693]: I1122 09:03:59.998794 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:03:59Z","lastTransitionTime":"2025-11-22T09:03:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.100170 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.100202 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.100210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.100222 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.100232 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.146436 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.146456 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.146460 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:00 crc kubenswrapper[4693]: E1122 09:04:00.146530 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:00 crc kubenswrapper[4693]: E1122 09:04:00.146635 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:00 crc kubenswrapper[4693]: E1122 09:04:00.146691 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.201919 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.201952 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.201961 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.201974 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.201983 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.232290 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.232994 4693 scope.go:117] "RemoveContainer" containerID="87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5" Nov 22 09:04:00 crc kubenswrapper[4693]: E1122 09:04:00.233181 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.304290 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.304320 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.304330 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.304342 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.304350 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.406176 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.406209 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.406218 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.406229 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.406236 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.507741 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.507798 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.507806 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.507818 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.507827 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.609673 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.609710 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.609718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.609730 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.609742 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.711147 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.711181 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.711189 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.711201 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.711210 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.812685 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.812723 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.812732 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.812754 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.812762 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.915038 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.915070 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.915078 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.915090 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:00 crc kubenswrapper[4693]: I1122 09:04:00.915098 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:00Z","lastTransitionTime":"2025-11-22T09:04:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.016609 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.016638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.016647 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.016659 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.016667 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.118704 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.118748 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.118757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.118771 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.118787 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.146079 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:01 crc kubenswrapper[4693]: E1122 09:04:01.146165 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.220261 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.220288 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.220299 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.220315 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.220326 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.322353 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.322385 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.322394 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.322406 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.322416 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.423621 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.423678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.423689 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.423705 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.423716 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.525652 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.525683 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.525692 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.525704 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.525710 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.627601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.627624 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.627632 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.627641 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.627648 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.729458 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.729495 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.729506 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.729520 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.729529 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.831042 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.831073 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.831083 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.831096 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.831104 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.932886 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.932923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.932933 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.932946 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:01 crc kubenswrapper[4693]: I1122 09:04:01.932956 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:01Z","lastTransitionTime":"2025-11-22T09:04:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.034557 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.034590 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.034598 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.034610 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.034620 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.135659 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.135692 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.135703 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.135717 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.135726 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.146046 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.146103 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.146125 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:02 crc kubenswrapper[4693]: E1122 09:04:02.146169 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:02 crc kubenswrapper[4693]: E1122 09:04:02.146212 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:02 crc kubenswrapper[4693]: E1122 09:04:02.146255 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.238358 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.238389 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.238398 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.238410 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.238420 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.340223 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.340257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.340264 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.340276 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.340285 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.442058 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.442101 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.442109 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.442121 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.442129 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.544181 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.544216 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.544225 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.544238 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.544247 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.646270 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.646301 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.646310 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.646324 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.646333 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.749821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.749916 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.749927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.749943 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.749953 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.851185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.851214 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.851223 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.851234 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.851241 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.861925 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:02 crc kubenswrapper[4693]: E1122 09:04:02.862083 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:02 crc kubenswrapper[4693]: E1122 09:04:02.862149 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:10.862136102 +0000 UTC m=+47.004638393 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.952760 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.952791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.952799 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.952810 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:02 crc kubenswrapper[4693]: I1122 09:04:02.952817 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:02Z","lastTransitionTime":"2025-11-22T09:04:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.054258 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.054302 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.054312 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.054327 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.054338 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.146734 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:03 crc kubenswrapper[4693]: E1122 09:04:03.146824 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.155747 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.155774 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.155784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.155794 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.155803 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.257599 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.257636 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.257646 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.257662 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.257671 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.359892 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.359927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.359936 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.359949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.359957 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.461798 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.461832 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.461869 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.461884 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.461893 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.564153 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.564177 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.564185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.564196 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.564204 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.666141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.666174 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.666183 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.666194 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.666202 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.768345 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.768378 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.768387 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.768400 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.768408 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.869934 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.869969 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.869979 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.869992 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.870001 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.971644 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.971677 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.971685 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.971698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:03 crc kubenswrapper[4693]: I1122 09:04:03.971706 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:03Z","lastTransitionTime":"2025-11-22T09:04:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.072973 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.073005 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.073013 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.073025 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.073034 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.145741 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.145782 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.145799 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:04 crc kubenswrapper[4693]: E1122 09:04:04.145897 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:04 crc kubenswrapper[4693]: E1122 09:04:04.145992 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:04 crc kubenswrapper[4693]: E1122 09:04:04.146043 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.154197 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.161371 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.167095 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.173358 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.174234 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.174259 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.174267 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.174279 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.174288 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.181213 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.192386 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.199032 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.206454 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.216113 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.224323 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.232161 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.241156 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.249999 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.257531 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.263809 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.271467 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.275657 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.275682 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.275690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.275702 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.275711 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.377220 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.377247 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.377255 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.377266 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.377275 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.479604 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.479638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.479646 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.479660 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.479668 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.581548 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.581597 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.581606 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.581619 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.581627 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.683219 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.683248 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.683257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.683268 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.683276 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.784915 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.784947 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.784955 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.784965 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.784975 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.886895 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.886920 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.886934 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.886944 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.886952 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.983417 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.983452 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.983460 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.983472 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.983481 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:04 crc kubenswrapper[4693]: E1122 09:04:04.992359 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:04Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.995077 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.995102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.995111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.995120 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:04 crc kubenswrapper[4693]: I1122 09:04:04.995127 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:04Z","lastTransitionTime":"2025-11-22T09:04:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: E1122 09:04:05.007175 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.013246 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.013277 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.013285 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.013298 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.013307 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: E1122 09:04:05.028123 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.035999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.036034 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.036044 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.036060 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.036069 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: E1122 09:04:05.045461 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.047829 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.047887 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.047898 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.047910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.047919 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: E1122 09:04:05.056227 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:05Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:05 crc kubenswrapper[4693]: E1122 09:04:05.056334 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.057300 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.057344 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.057352 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.057364 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.057373 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.146692 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:05 crc kubenswrapper[4693]: E1122 09:04:05.146821 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.158927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.158958 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.158967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.158980 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.158989 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.260423 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.260456 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.260464 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.260475 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.260483 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.362316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.362353 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.362363 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.362376 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.362384 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.464182 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.464208 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.464215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.464227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.464236 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.566512 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.566562 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.566577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.566596 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.566612 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.668599 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.668631 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.668655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.668675 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.668684 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.770467 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.770493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.770500 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.770509 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.770517 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.872535 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.872569 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.872578 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.872591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.872599 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.974811 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.974867 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.974877 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.974890 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:05 crc kubenswrapper[4693]: I1122 09:04:05.974898 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:05Z","lastTransitionTime":"2025-11-22T09:04:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.076442 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.076472 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.076481 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.076490 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.076496 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.145813 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.145890 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.145970 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:06 crc kubenswrapper[4693]: E1122 09:04:06.145968 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:06 crc kubenswrapper[4693]: E1122 09:04:06.146049 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:06 crc kubenswrapper[4693]: E1122 09:04:06.146140 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.178110 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.178137 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.178145 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.178155 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.178162 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.279873 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.279904 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.279913 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.279923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.279930 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.381862 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.381901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.381913 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.381927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.381944 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.483838 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.483908 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.483917 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.483929 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.483937 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.585624 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.585655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.585663 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.585675 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.585684 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.687449 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.687476 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.687484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.687493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.687500 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.789484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.789512 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.789521 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.789547 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.789555 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.891173 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.891203 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.891214 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.891227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.891236 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.992907 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.992935 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.992943 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.992954 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:06 crc kubenswrapper[4693]: I1122 09:04:06.992962 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:06Z","lastTransitionTime":"2025-11-22T09:04:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.096002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.096045 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.096056 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.096066 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.096074 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.146644 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:07 crc kubenswrapper[4693]: E1122 09:04:07.146741 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.197307 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.197333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.197341 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.197352 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.197360 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.299368 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.299426 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.299437 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.299446 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.299454 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.401579 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.401611 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.401621 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.401635 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.401644 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.502993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.503024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.503031 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.503044 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.503052 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.604282 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.604314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.604325 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.604338 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.604345 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.705915 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.705942 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.705951 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.705962 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.705970 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.808268 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.808299 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.808306 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.808318 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.808326 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.910178 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.910206 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.910215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.910226 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:07 crc kubenswrapper[4693]: I1122 09:04:07.910250 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:07Z","lastTransitionTime":"2025-11-22T09:04:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.011805 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.011838 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.011862 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.011883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.011893 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.114080 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.114215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.114278 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.114338 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.114406 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.146199 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.146253 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.146279 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:08 crc kubenswrapper[4693]: E1122 09:04:08.146366 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:08 crc kubenswrapper[4693]: E1122 09:04:08.146444 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:08 crc kubenswrapper[4693]: E1122 09:04:08.146505 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.215821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.215886 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.215896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.215907 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.215915 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.319493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.319523 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.319532 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.319543 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.319552 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.420919 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.421057 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.421133 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.421192 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.421244 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.522591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.522628 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.522638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.522652 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.522661 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.624749 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.624783 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.624790 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.624803 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.624813 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.726033 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.726065 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.726073 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.726085 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.726094 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.827768 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.827860 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.827871 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.827896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.827906 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.929748 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.929786 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.929798 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.929817 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:08 crc kubenswrapper[4693]: I1122 09:04:08.929827 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:08Z","lastTransitionTime":"2025-11-22T09:04:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.031905 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.032056 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.032120 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.032183 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.032241 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.134420 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.134461 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.134472 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.134486 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.134495 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.146587 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:09 crc kubenswrapper[4693]: E1122 09:04:09.146779 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.236459 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.236528 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.236538 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.236553 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.236565 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.337821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.337884 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.337894 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.337907 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.337917 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.439148 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.439180 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.439189 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.439200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.439208 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.541193 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.541225 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.541234 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.541245 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.541255 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.643389 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.643470 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.643484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.643498 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.643510 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.745049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.745088 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.745097 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.745106 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.745113 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.846317 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.846339 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.846346 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.846356 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.846363 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.948102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.948127 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.948135 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.948144 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:09 crc kubenswrapper[4693]: I1122 09:04:09.948151 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:09Z","lastTransitionTime":"2025-11-22T09:04:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.049860 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.049882 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.049897 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.049906 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.049913 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.145868 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:10 crc kubenswrapper[4693]: E1122 09:04:10.145949 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.145870 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.145973 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:10 crc kubenswrapper[4693]: E1122 09:04:10.146003 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:10 crc kubenswrapper[4693]: E1122 09:04:10.146044 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.151316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.151334 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.151341 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.151350 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.151357 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.252474 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.252494 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.252502 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.252511 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.252519 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.353740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.353772 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.353781 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.353793 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.353801 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.455274 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.455307 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.455316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.455328 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.455338 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.557188 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.557209 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.557216 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.557226 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.557233 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.659273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.659305 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.659314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.659325 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.659334 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.761064 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.761096 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.761104 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.761117 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.761126 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.862642 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.862698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.862708 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.862722 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.862731 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.930463 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:10 crc kubenswrapper[4693]: E1122 09:04:10.930578 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:10 crc kubenswrapper[4693]: E1122 09:04:10.930645 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:26.93062716 +0000 UTC m=+63.073129461 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.964866 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.964904 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.964914 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.964927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:10 crc kubenswrapper[4693]: I1122 09:04:10.964937 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:10Z","lastTransitionTime":"2025-11-22T09:04:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.066690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.066715 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.066723 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.066733 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.066742 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.146687 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:11 crc kubenswrapper[4693]: E1122 09:04:11.146773 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.168534 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.168561 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.168568 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.168577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.168584 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.270453 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.270486 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.270496 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.270508 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.270517 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.372542 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.372575 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.372583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.372594 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.372607 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.474992 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.475020 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.475027 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.475037 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.475044 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.577438 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.577468 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.577478 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.577496 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.577507 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.679791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.679825 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.679834 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.679863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.679873 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.781401 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.781437 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.781445 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.781458 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.781469 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.882765 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.882797 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.882805 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.882818 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.882826 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.984329 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.984356 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.984365 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.984374 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:11 crc kubenswrapper[4693]: I1122 09:04:11.984381 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:11Z","lastTransitionTime":"2025-11-22T09:04:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.085938 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.085973 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.085981 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.085993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.086004 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.146517 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.146567 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:12 crc kubenswrapper[4693]: E1122 09:04:12.146602 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:12 crc kubenswrapper[4693]: E1122 09:04:12.146676 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.146716 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:12 crc kubenswrapper[4693]: E1122 09:04:12.146807 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.187987 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.188014 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.188024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.188035 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.188043 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.289613 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.289655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.289666 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.289680 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.289690 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.391222 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.391259 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.391269 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.391283 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.391294 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.494561 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.494594 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.494603 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.494615 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.494625 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.596497 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.596528 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.596536 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.596548 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.596557 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.698515 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.698545 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.698553 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.698582 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.698593 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.800678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.800861 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.800940 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.801006 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.801069 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.903004 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.903036 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.903047 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.903059 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:12 crc kubenswrapper[4693]: I1122 09:04:12.903067 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:12Z","lastTransitionTime":"2025-11-22T09:04:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.004525 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.004573 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.004583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.004601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.004611 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.106424 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.106454 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.106462 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.106472 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.106480 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.146477 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:13 crc kubenswrapper[4693]: E1122 09:04:13.146574 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.147000 4693 scope.go:117] "RemoveContainer" containerID="87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.208612 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.208750 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.208758 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.208771 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.208779 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.310816 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.310880 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.310892 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.310903 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.310912 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.370523 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/1.log" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.372469 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.372802 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.383733 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.400223 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.408258 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.412729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.412760 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.412769 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.412782 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.412789 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.425611 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.437753 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.445269 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.451611 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.458337 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.465743 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.478280 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.486180 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.494873 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.502334 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.510527 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.515074 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.515102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.515110 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.515122 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.515136 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.520907 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.528060 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:13Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.617170 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.617202 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.617210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.617224 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.617254 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.719316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.719342 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.719350 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.719363 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.719371 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.821673 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.821716 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.821725 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.821737 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.821747 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.923544 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.923574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.923584 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.923597 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.923607 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:13Z","lastTransitionTime":"2025-11-22T09:04:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.953997 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:04:13 crc kubenswrapper[4693]: E1122 09:04:13.954106 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:04:45.954084881 +0000 UTC m=+82.096587182 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:04:13 crc kubenswrapper[4693]: I1122 09:04:13.954176 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:13 crc kubenswrapper[4693]: E1122 09:04:13.954288 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:04:13 crc kubenswrapper[4693]: E1122 09:04:13.954331 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:45.954322851 +0000 UTC m=+82.096825152 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.024773 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.024795 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.024804 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.024816 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.024825 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.055279 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.055322 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.055343 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055433 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055446 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055455 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055462 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055480 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055486 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:46.05547512 +0000 UTC m=+82.197977411 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055490 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055519 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:46.055510096 +0000 UTC m=+82.198012387 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055553 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.055577 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:46.055569939 +0000 UTC m=+82.198072240 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.126780 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.126801 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.126809 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.126819 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.126826 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.145708 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.145749 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.145756 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.145820 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.146006 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.146078 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.154966 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.166791 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.174824 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.183246 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.190469 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.198248 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.206731 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.214052 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.227348 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.228267 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.228289 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.228298 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.228310 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.228318 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.235955 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.242469 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.250278 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.258059 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.265496 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.272803 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.280176 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.329534 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.329569 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.329579 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.329591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.329601 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.376226 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/2.log" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.376763 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/1.log" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.379174 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2" exitCode=1 Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.379202 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.379235 4693 scope.go:117] "RemoveContainer" containerID="87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.379660 4693 scope.go:117] "RemoveContainer" containerID="e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2" Nov 22 09:04:14 crc kubenswrapper[4693]: E1122 09:04:14.379793 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.386650 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.393786 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.401477 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.410553 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.418182 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.430508 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://87fa1665304e514c497a1b2de305624a383c33c31a25d50806b913d8c073cde5\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:03:50Z\\\",\\\"message\\\":\\\"ort{ServicePort{Name:http,Protocol:TCP,Port:80,TargetPort:{0 8080 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{app: console,component: downloads,},ClusterIP:10.217.4.213,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.213],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI1122 09:03:50.889653 6103 model_client.go:382] Update operations generated as: [{Op:update Table:Load_Balancer Row:map[external_ids:{GoMap:map[k8s.ovn.org/kind:Service k8s.ovn.org/owner:default/kubernetes]} name:Service_default/kubernetes_TCP_node_switch_crc options:{GoMap:map[event:false hairpin_snat_ip:169.254.0.5 fd69::5 neighbor_responder:none reject:true skip_snat:false]} protocol:{GoSet:[tcp]} selection_fields:{GoSet:[]} vips:{GoMap:map[10.217.4.1:443:192.168.126.11:6443]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {b21188fe-5483-4717-afe6-20a41a40b91a}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:50Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.431088 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.431114 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.431123 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.431134 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.431141 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.437767 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.445549 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.454273 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.461321 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.469608 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.477401 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.484855 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.491149 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.499493 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.507700 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:14Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.533095 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.533131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.533140 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.533154 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.533165 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.635208 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.635238 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.635245 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.635257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.635265 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.737234 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.737266 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.737274 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.737286 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.737294 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.839060 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.839092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.839101 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.839116 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.839125 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.940955 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.941003 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.941012 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.941024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:14 crc kubenswrapper[4693]: I1122 09:04:14.941033 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:14Z","lastTransitionTime":"2025-11-22T09:04:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.042716 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.042760 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.042768 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.042782 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.042791 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.144688 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.144722 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.144731 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.144744 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.144755 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.145899 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.145993 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.246169 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.246262 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.246270 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.246281 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.246288 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.274823 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.274873 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.274881 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.274893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.274903 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.283538 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.285879 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.285918 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.285934 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.285964 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.285978 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.294359 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.296502 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.296529 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.296538 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.296567 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.296575 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.304299 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.306464 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.306563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.306629 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.306691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.306752 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.314482 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.316484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.316575 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.316638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.316701 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.316751 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.323888 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:15Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.324001 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.348078 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.348400 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.348456 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.348511 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.348577 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.382401 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/2.log" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.384896 4693 scope.go:117] "RemoveContainer" containerID="e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2" Nov 22 09:04:15 crc kubenswrapper[4693]: E1122 09:04:15.385021 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.393025 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.400353 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.407033 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.413789 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.420431 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.431536 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.440047 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.448625 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.449767 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.449800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.449808 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.449820 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.449828 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.456360 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.463869 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.472591 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.479454 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.487780 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.495388 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.501527 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.509144 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:15Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.551505 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.551533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.551542 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.551554 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.551562 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.657185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.657217 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.657251 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.657265 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.657274 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.759287 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.759324 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.759333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.759343 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.759350 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.861168 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.861200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.861209 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.861223 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.861232 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.963249 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.963285 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.963295 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.963308 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:15 crc kubenswrapper[4693]: I1122 09:04:15.963317 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:15Z","lastTransitionTime":"2025-11-22T09:04:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.065539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.065571 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.065581 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.065593 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.065602 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.146769 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.146801 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:16 crc kubenswrapper[4693]: E1122 09:04:16.146969 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:16 crc kubenswrapper[4693]: E1122 09:04:16.147053 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.147265 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:16 crc kubenswrapper[4693]: E1122 09:04:16.147505 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.168090 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.168199 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.168283 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.168339 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.168388 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.269924 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.270019 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.270087 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.270141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.270188 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.371531 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.371562 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.371570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.371582 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.371590 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.473706 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.473732 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.473742 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.473753 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.473762 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.575588 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.575705 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.575771 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.575836 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.575924 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.677328 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.677357 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.677366 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.677377 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.677385 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.779215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.779246 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.779254 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.779265 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.779275 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.880988 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.881121 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.881192 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.881275 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.881329 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.982552 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.982580 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.982589 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.982598 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:16 crc kubenswrapper[4693]: I1122 09:04:16.982606 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:16Z","lastTransitionTime":"2025-11-22T09:04:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.084655 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.084675 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.084683 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.084693 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.084700 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.146382 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:17 crc kubenswrapper[4693]: E1122 09:04:17.146473 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.186320 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.186412 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.186471 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.186532 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.186590 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.287935 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.287990 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.288002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.288015 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.288023 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.389690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.389708 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.389718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.389729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.389738 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.491038 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.491071 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.491083 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.491094 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.491103 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.593029 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.593108 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.593118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.593132 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.593139 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.695506 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.695530 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.695537 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.695547 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.695555 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.797642 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.797673 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.797681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.797691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.797700 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.899408 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.899440 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.899447 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.899459 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:17 crc kubenswrapper[4693]: I1122 09:04:17.899466 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:17Z","lastTransitionTime":"2025-11-22T09:04:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.001469 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.001503 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.001511 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.001523 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.001533 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.102751 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.102782 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.102791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.102802 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.102810 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.146201 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.146246 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:18 crc kubenswrapper[4693]: E1122 09:04:18.146322 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.146371 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:18 crc kubenswrapper[4693]: E1122 09:04:18.146481 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:18 crc kubenswrapper[4693]: E1122 09:04:18.146607 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.204317 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.204347 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.204358 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.204368 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.204380 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.306284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.306310 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.306317 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.306328 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.306336 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.407909 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.407930 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.407938 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.407947 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.407954 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.509523 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.509550 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.509560 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.509587 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.509595 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.610712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.610811 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.610896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.610955 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.611038 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.712491 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.712539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.712552 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.712570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.712592 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.814734 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.814786 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.814793 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.814816 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.814823 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.916875 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.916916 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.916930 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.916945 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:18 crc kubenswrapper[4693]: I1122 09:04:18.916956 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:18Z","lastTransitionTime":"2025-11-22T09:04:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.018294 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.018320 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.018329 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.018340 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.018350 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.119795 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.119823 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.119832 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.119863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.119873 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.146112 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:19 crc kubenswrapper[4693]: E1122 09:04:19.146207 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.220877 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.220902 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.220911 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.220921 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.220929 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.323098 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.323172 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.323195 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.323222 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.323232 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.425313 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.425338 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.425347 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.425356 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.425365 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.527293 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.527347 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.527356 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.527367 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.527377 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.629520 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.629591 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.629601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.629613 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.629621 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.661672 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.668054 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.669886 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.682657 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.692246 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.700331 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.708021 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.715956 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.724941 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.731533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.731561 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.731569 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.731581 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.731590 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.732429 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.741158 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.748654 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.754920 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.762609 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.769735 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.776815 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.782817 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.789007 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:19Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.833316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.833341 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.833405 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.833416 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.833439 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.935393 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.935420 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.935427 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.935439 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:19 crc kubenswrapper[4693]: I1122 09:04:19.935446 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:19Z","lastTransitionTime":"2025-11-22T09:04:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.036638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.036680 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.036688 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.036698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.036706 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.141092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.141121 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.141131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.141307 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.141330 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.145803 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.145826 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:20 crc kubenswrapper[4693]: E1122 09:04:20.145902 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.145954 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:20 crc kubenswrapper[4693]: E1122 09:04:20.146085 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:20 crc kubenswrapper[4693]: E1122 09:04:20.146136 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.243420 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.243475 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.243485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.243500 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.243511 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.345145 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.345171 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.345179 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.345189 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.345198 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.446687 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.446714 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.446751 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.446764 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.446772 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.548630 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.548654 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.548662 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.548678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.548686 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.650551 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.650577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.650604 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.650614 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.650621 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.752695 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.752718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.752725 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.752734 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.752761 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.854925 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.854952 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.854960 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.854974 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.854982 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.958091 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.958113 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.958121 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.958132 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:20 crc kubenswrapper[4693]: I1122 09:04:20.958140 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:20Z","lastTransitionTime":"2025-11-22T09:04:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.059335 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.059361 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.059369 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.059381 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.059389 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.146151 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:21 crc kubenswrapper[4693]: E1122 09:04:21.146255 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.161015 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.161041 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.161049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.161060 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.161067 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.262885 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.262916 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.262925 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.262937 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.262946 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.364302 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.364354 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.364368 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.364384 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.364397 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.465518 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.465545 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.465552 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.465561 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.465568 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.567325 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.567354 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.567362 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.567372 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.567382 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.669161 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.669187 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.669196 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.669206 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.669215 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.770678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.770705 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.770714 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.770727 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.770739 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.872474 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.872522 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.872533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.872547 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.872558 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.974350 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.974375 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.974383 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.974392 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:21 crc kubenswrapper[4693]: I1122 09:04:21.974402 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:21Z","lastTransitionTime":"2025-11-22T09:04:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.076284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.076319 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.076327 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.076339 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.076363 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.146447 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:22 crc kubenswrapper[4693]: E1122 09:04:22.146540 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.146643 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.146683 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:22 crc kubenswrapper[4693]: E1122 09:04:22.146747 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:22 crc kubenswrapper[4693]: E1122 09:04:22.146891 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.178192 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.178231 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.178242 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.178253 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.178260 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.280023 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.280052 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.280061 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.280072 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.280081 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.381776 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.381803 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.381812 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.381822 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.381830 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.483810 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.483880 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.483893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.483910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.483926 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.585511 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.585539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.585548 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.585559 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.585566 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.687094 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.687123 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.687131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.687141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.687147 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.788529 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.788555 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.788563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.788571 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.788579 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.890427 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.890459 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.890469 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.890485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.890495 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.991757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.991791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.991799 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.991812 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:22 crc kubenswrapper[4693]: I1122 09:04:22.991821 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:22Z","lastTransitionTime":"2025-11-22T09:04:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.093784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.093819 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.093829 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.093856 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.093866 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.145928 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:23 crc kubenswrapper[4693]: E1122 09:04:23.146047 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.196085 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.196129 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.196138 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.196151 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.196159 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.298185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.298217 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.298227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.298239 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.298248 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.400075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.400111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.400120 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.400134 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.400143 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.502652 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.502685 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.502694 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.502706 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.502714 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.604153 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.604187 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.604195 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.604208 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.604216 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.705989 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.706047 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.706057 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.706075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.706084 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.807636 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.807667 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.807677 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.807687 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.807695 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.909452 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.909475 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.909484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.909493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:23 crc kubenswrapper[4693]: I1122 09:04:23.909502 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:23Z","lastTransitionTime":"2025-11-22T09:04:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.011199 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.011230 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.011240 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.011253 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.011262 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.112261 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.112289 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.112297 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.112308 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.112316 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.146482 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.146533 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:24 crc kubenswrapper[4693]: E1122 09:04:24.146597 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.146482 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:24 crc kubenswrapper[4693]: E1122 09:04:24.146701 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:24 crc kubenswrapper[4693]: E1122 09:04:24.146800 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.157211 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.166200 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.173113 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.180589 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.189068 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.195743 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.203865 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.211047 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.213782 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.213802 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.213809 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.213821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.213830 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.217546 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.225398 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.232698 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.239918 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.246077 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.252661 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.259687 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.266409 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.278097 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:24Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.315434 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.315539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.315608 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.315687 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.315747 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.417444 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.417471 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.417480 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.417492 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.417501 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.519550 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.519963 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.520042 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.520107 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.520175 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.621718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.621748 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.621757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.621770 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.621777 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.727436 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.727479 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.727488 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.727501 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.727510 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.829604 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.829665 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.829675 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.829700 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.829717 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.931937 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.931984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.931993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.932009 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:24 crc kubenswrapper[4693]: I1122 09:04:24.932022 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:24Z","lastTransitionTime":"2025-11-22T09:04:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.034052 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.034105 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.034116 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.034133 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.034143 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.136590 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.136636 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.136647 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.136667 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.136683 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.146037 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.146182 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.239215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.239258 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.239268 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.239285 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.239303 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.342060 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.342097 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.342107 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.342124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.342135 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.443638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.443680 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.443690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.443703 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.443716 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.545281 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.545313 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.545324 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.545336 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.545348 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.647426 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.647461 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.647470 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.647485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.647496 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.718441 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.718469 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.718477 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.718490 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.718499 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.727071 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.729721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.729737 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.729744 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.729756 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.729764 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.739196 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.741642 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.741670 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.741679 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.741690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.741698 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.749507 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.751966 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.752015 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.752026 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.752053 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.752065 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.759778 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.762635 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.762687 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.762698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.762713 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.762722 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.770440 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:25Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:25 crc kubenswrapper[4693]: E1122 09:04:25.770654 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.771820 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.771863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.771876 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.771890 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.771903 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.873439 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.873476 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.873485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.873499 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.873510 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.976178 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.976215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.976224 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.976240 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:25 crc kubenswrapper[4693]: I1122 09:04:25.976250 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:25Z","lastTransitionTime":"2025-11-22T09:04:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.078630 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.078665 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.078673 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.078687 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.078696 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.146583 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:26 crc kubenswrapper[4693]: E1122 09:04:26.146710 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.146883 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:26 crc kubenswrapper[4693]: E1122 09:04:26.146964 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.147168 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:26 crc kubenswrapper[4693]: E1122 09:04:26.147219 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.180185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.180212 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.180221 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.180232 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.180243 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.283087 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.283130 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.283139 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.283155 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.283166 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.384979 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.385016 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.385024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.385035 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.385054 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.486903 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.486945 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.486953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.486967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.486978 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.588860 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.589167 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.589234 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.589303 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.589357 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.690999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.691056 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.691066 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.691077 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.691084 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.793011 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.793059 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.793067 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.793076 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.793083 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.894883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.895027 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.895119 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.895200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.895275 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.962509 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:26 crc kubenswrapper[4693]: E1122 09:04:26.962666 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:26 crc kubenswrapper[4693]: E1122 09:04:26.962723 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:04:58.962709487 +0000 UTC m=+95.105211777 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.997113 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.997140 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.997149 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.997162 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:26 crc kubenswrapper[4693]: I1122 09:04:26.997171 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:26Z","lastTransitionTime":"2025-11-22T09:04:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.098989 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.099025 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.099034 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.099058 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.099072 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.146070 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:27 crc kubenswrapper[4693]: E1122 09:04:27.146443 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.146637 4693 scope.go:117] "RemoveContainer" containerID="e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2" Nov 22 09:04:27 crc kubenswrapper[4693]: E1122 09:04:27.146830 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.200951 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.200984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.200992 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.201008 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.201019 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.302689 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.302906 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.303135 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.303326 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.303433 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.405381 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.405523 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.405616 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.405691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.405786 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.507522 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.507555 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.507563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.507578 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.507586 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.609294 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.609327 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.609336 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.609349 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.609359 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.710894 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.710928 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.710937 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.710949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.710957 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.812934 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.812967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.812975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.812986 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.812994 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.914601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.914627 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.914634 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.914648 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:27 crc kubenswrapper[4693]: I1122 09:04:27.914657 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:27Z","lastTransitionTime":"2025-11-22T09:04:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.016796 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.016943 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.017125 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.017296 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.017374 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.119346 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.119380 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.119389 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.119405 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.119414 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.145918 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.145950 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:28 crc kubenswrapper[4693]: E1122 09:04:28.146003 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.146028 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:28 crc kubenswrapper[4693]: E1122 09:04:28.146129 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:28 crc kubenswrapper[4693]: E1122 09:04:28.146160 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.223436 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.223463 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.223474 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.223493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.223504 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.324989 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.325013 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.325022 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.325036 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.325043 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.417664 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/0.log" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.417710 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7" containerID="b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663" exitCode=1 Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.417736 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerDied","Data":"b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.418076 4693 scope.go:117] "RemoveContainer" containerID="b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.426472 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.426501 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.426510 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.426523 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.426534 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.429512 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.439083 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.453618 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.461167 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.469685 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.479424 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.487002 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.494975 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.506424 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.515776 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.524029 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.528126 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.528225 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.528296 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.528361 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.528479 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.531189 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.542806 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.551628 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.560309 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.567815 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.575868 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:28Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.630161 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.630188 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.630197 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.630207 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.630216 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.731932 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.731973 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.731981 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.731994 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.732004 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.833514 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.833575 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.833583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.833597 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.833606 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.935278 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.935380 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.935437 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.935501 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:28 crc kubenswrapper[4693]: I1122 09:04:28.935555 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:28Z","lastTransitionTime":"2025-11-22T09:04:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.037576 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.037615 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.037636 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.037646 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.037653 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.138785 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.138960 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.139035 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.139113 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.139173 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.146137 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:29 crc kubenswrapper[4693]: E1122 09:04:29.146220 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.240700 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.240724 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.240733 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.240743 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.240749 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.341978 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.342012 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.342020 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.342032 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.342041 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.421748 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/0.log" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.421792 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerStarted","Data":"6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.431962 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.439039 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.444115 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.444139 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.444148 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.444160 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.444167 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.447817 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.459548 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.466920 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.473925 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.482210 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.490809 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.501611 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.514186 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.522205 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.530462 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.538772 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.545862 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.545904 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.545916 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.545935 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.545948 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.550321 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.557527 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.565607 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.576281 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:29Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.647771 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.647802 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.647812 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.647827 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.647838 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.750013 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.750085 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.750096 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.750110 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.750120 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.851817 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.851866 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.851876 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.851887 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.851895 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.953638 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.953690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.953698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.953710 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:29 crc kubenswrapper[4693]: I1122 09:04:29.953719 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:29Z","lastTransitionTime":"2025-11-22T09:04:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.056041 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.056086 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.056102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.056118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.056128 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.145774 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.145810 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.145926 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:30 crc kubenswrapper[4693]: E1122 09:04:30.145997 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:30 crc kubenswrapper[4693]: E1122 09:04:30.145922 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:30 crc kubenswrapper[4693]: E1122 09:04:30.146070 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.158435 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.158471 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.158483 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.158498 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.158516 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.260431 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.260497 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.260507 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.260517 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.260526 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.362233 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.362267 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.362276 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.362290 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.362300 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.464141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.464255 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.464271 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.464299 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.464319 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.566286 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.566333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.566342 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.566358 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.566369 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.668645 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.668712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.668724 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.668747 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.668762 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.770769 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.770807 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.770815 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.770831 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.770861 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.872800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.872872 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.872886 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.872903 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.872917 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.977342 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.977400 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.977413 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.977433 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:30 crc kubenswrapper[4693]: I1122 09:04:30.977446 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:30Z","lastTransitionTime":"2025-11-22T09:04:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.079258 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.079293 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.079301 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.079314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.079326 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.146366 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:31 crc kubenswrapper[4693]: E1122 09:04:31.146538 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.181240 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.181284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.181292 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.181306 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.181316 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.283496 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.283534 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.283542 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.283559 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.283568 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.385450 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.385485 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.385493 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.385507 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.385517 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.488037 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.488118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.488131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.488160 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.488177 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.589738 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.589780 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.589793 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.589808 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.589819 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.691474 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.691509 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.691517 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.691532 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.691540 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.793611 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.793674 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.793683 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.793702 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.793716 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.896065 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.896108 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.896117 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.896131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.896141 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.998735 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.998770 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.998779 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.998793 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:31 crc kubenswrapper[4693]: I1122 09:04:31.998801 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:31Z","lastTransitionTime":"2025-11-22T09:04:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.100686 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.100717 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.100726 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.100738 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.100746 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.146289 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.146319 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:32 crc kubenswrapper[4693]: E1122 09:04:32.146408 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.146420 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:32 crc kubenswrapper[4693]: E1122 09:04:32.146622 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:32 crc kubenswrapper[4693]: E1122 09:04:32.146654 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.202502 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.202522 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.202529 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.202539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.202547 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.305252 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.305282 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.305291 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.305303 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.305310 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.406402 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.406427 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.406436 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.406449 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.406456 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.508558 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.508622 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.508633 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.508661 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.508679 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.610196 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.610244 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.610254 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.610273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.610286 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.711732 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.712147 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.712243 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.712331 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.712526 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.814409 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.814441 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.814449 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.814461 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.814469 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.916386 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.916557 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.916628 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.916696 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:32 crc kubenswrapper[4693]: I1122 09:04:32.916765 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:32Z","lastTransitionTime":"2025-11-22T09:04:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.018897 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.019022 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.019119 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.019190 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.019246 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.121185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.121220 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.121229 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.121244 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.121255 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.145884 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:33 crc kubenswrapper[4693]: E1122 09:04:33.146198 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.223253 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.223308 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.223320 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.223340 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.223354 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.325050 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.325098 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.325119 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.325136 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.325148 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.426859 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.426897 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.426905 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.426923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.426934 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.528858 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.528898 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.528908 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.528926 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.528936 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.631528 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.631576 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.631585 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.631600 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.631609 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.734064 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.734101 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.734110 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.734128 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.734136 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.835711 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.835740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.835750 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.835761 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.835767 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.937712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.937741 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.937751 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.937764 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:33 crc kubenswrapper[4693]: I1122 09:04:33.937771 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:33Z","lastTransitionTime":"2025-11-22T09:04:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.040008 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.040068 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.040087 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.040111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.040137 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.142222 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.142272 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.142296 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.142307 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.142314 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.146993 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.147293 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:34 crc kubenswrapper[4693]: E1122 09:04:34.147283 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.147365 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:34 crc kubenswrapper[4693]: E1122 09:04:34.147433 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:34 crc kubenswrapper[4693]: E1122 09:04:34.147593 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.160577 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.170917 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.181079 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.191273 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.199960 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.208194 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.215485 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.230707 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.241189 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.243676 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.243702 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.243712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.243728 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.243767 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.250262 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.264348 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.281014 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.304110 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.319543 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.328961 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.338054 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.345200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.345248 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.345257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.345273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.345283 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.348196 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:34Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.447049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.447081 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.447090 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.447104 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.447115 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.549088 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.549126 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.549137 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.549151 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.549161 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.651131 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.651166 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.651175 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.651188 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.651197 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.753299 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.753339 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.753349 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.753366 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.753376 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.855194 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.855227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.855237 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.855253 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.855262 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.957491 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.957529 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.957539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.957555 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:34 crc kubenswrapper[4693]: I1122 09:04:34.957564 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:34Z","lastTransitionTime":"2025-11-22T09:04:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.059166 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.059200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.059209 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.059224 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.059234 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.145815 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.145927 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.161408 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.161430 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.161440 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.161452 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.161460 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.263263 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.263297 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.263311 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.263326 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.263335 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.365575 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.365608 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.365618 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.365631 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.365640 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.467603 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.467660 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.467672 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.467688 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.467697 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.569075 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.569112 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.569124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.569148 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.569159 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.670760 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.670800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.670814 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.670834 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.670891 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.773324 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.773414 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.773428 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.773444 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.773463 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.875802 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.875863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.875875 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.875893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.875902 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.888612 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.888662 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.888674 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.888685 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.888695 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.899696 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.902501 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.902529 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.902540 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.902570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.902578 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.912591 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.916752 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.916778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.916787 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.916802 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.916811 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.925164 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.927525 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.927555 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.927566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.927579 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.927590 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.936159 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.938754 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.938779 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.938787 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.938801 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.938812 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.947158 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:35Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:35 crc kubenswrapper[4693]: E1122 09:04:35.947260 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.977163 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.977211 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.977221 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.977233 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:35 crc kubenswrapper[4693]: I1122 09:04:35.977242 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:35Z","lastTransitionTime":"2025-11-22T09:04:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.078864 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.078894 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.078907 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.078918 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.078942 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.146007 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.146041 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:36 crc kubenswrapper[4693]: E1122 09:04:36.146123 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.146200 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:36 crc kubenswrapper[4693]: E1122 09:04:36.146226 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:36 crc kubenswrapper[4693]: E1122 09:04:36.146349 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.180836 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.180882 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.180891 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.180902 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.180912 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.283321 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.283359 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.283371 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.283388 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.283398 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.384831 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.384890 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.384899 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.384910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.384918 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.486371 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.486412 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.486427 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.486444 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.486455 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.587893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.587941 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.587951 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.587962 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.587970 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.689587 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.689617 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.689625 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.689635 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.689661 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.791665 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.792012 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.792092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.792174 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.792237 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.893488 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.893518 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.893546 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.893556 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.893564 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.995005 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.995032 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.995040 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.995052 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:36 crc kubenswrapper[4693]: I1122 09:04:36.995060 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:36Z","lastTransitionTime":"2025-11-22T09:04:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.096977 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.097012 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.097020 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.097033 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.097041 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.146482 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:37 crc kubenswrapper[4693]: E1122 09:04:37.146582 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.198566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.198594 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.198603 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.198616 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.198624 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.300273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.300305 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.300314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.300326 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.300334 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.402275 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.402314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.402345 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.402361 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.402370 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.504349 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.504389 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.504400 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.504411 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.504421 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.605615 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.605651 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.605660 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.605674 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.605683 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.707321 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.707348 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.707357 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.707367 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.707375 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.809026 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.809053 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.809081 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.809092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.809099 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.910709 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.910916 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.910994 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.911065 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:37 crc kubenswrapper[4693]: I1122 09:04:37.911133 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:37Z","lastTransitionTime":"2025-11-22T09:04:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.013753 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.013790 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.013799 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.013815 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.013823 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.115902 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.115928 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.115939 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.115949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.115956 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.146429 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.146475 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.146437 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:38 crc kubenswrapper[4693]: E1122 09:04:38.146558 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:38 crc kubenswrapper[4693]: E1122 09:04:38.146660 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:38 crc kubenswrapper[4693]: E1122 09:04:38.146736 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.147448 4693 scope.go:117] "RemoveContainer" containerID="e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.217739 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.217774 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.217784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.217799 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.217810 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.319455 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.319490 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.319499 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.319514 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.319523 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.421317 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.421359 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.421369 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.421384 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.421394 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.452023 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/2.log" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.454090 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.454502 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.463063 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.469973 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.481674 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.488350 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.496343 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.504409 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.511809 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.520113 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.522991 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.523024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.523033 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.523048 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.523058 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.530235 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.538920 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.546510 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.553531 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.562409 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.570336 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.578103 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.584822 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.591645 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.625388 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.625417 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.625445 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.625455 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.625463 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.730248 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.730291 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.730301 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.730315 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.730324 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.832118 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.832164 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.832175 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.832189 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.832199 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.934771 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.934802 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.934812 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.934824 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:38 crc kubenswrapper[4693]: I1122 09:04:38.934833 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:38Z","lastTransitionTime":"2025-11-22T09:04:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.036621 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.036652 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.036661 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.036670 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.036677 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.138059 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.138090 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.138100 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.138111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.138122 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.146515 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:39 crc kubenswrapper[4693]: E1122 09:04:39.146623 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.239660 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.239689 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.239698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.239713 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.239723 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.341914 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.341956 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.341967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.341982 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.341990 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.443700 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.443736 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.443744 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.443757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.443765 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.457790 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/3.log" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.458319 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/2.log" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.460430 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" exitCode=1 Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.460462 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.460499 4693 scope.go:117] "RemoveContainer" containerID="e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.461157 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:04:39 crc kubenswrapper[4693]: E1122 09:04:39.461892 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.470234 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.478466 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.485851 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.493590 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.502984 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.509731 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.518158 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.526406 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.532689 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.540623 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.545729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.545757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.545766 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.545778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.545786 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.548013 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.555442 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.561466 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.567592 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.574812 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.582343 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.593996 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e5559688643e61373e63d9b021686bc2c26cff477fff42bc71c668fa661704e2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:13Z\\\",\\\"message\\\":\\\"1 6410 model_client.go:382] Update operations generated as: [{Op:update Table:Logical_Switch_Port Row:map[addresses:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]} options:{GoMap:map[iface-id-ver:3b6479f0-333b-4a96-9adf-2099afdc2447 requested-chassis:crc]} port_security:{GoSet:[0a:58:0a:d9:00:04 10.217.0.4]}] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {61897e97-c771-4738-8709-09636387cb00}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748355 6410 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1122 09:04:13.748101 6410 obj_retry.go:386] Retry successful for *v1.Pod openshift-network-node-identity/network-node-identity-vrzqb after 0 failed attempt(s)\\\\nI1122 09:04:13.748398 6410 default_network_controller.go:776] Recording success event on pod openshift-network-node-identity/network-node-identity-vrzqb\\\\nF1122 09:04:13.748151 6410 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:13Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:38Z\\\",\\\"message\\\":\\\"4:38.744424 6764 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1122 09:04:38.743836 6764 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}\\\\nF1122 09:04:38.744539 6764 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z]\\\\nI1122 09:04:38.744556 6764 services_controller.go:360] Finished syncin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:39Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.647526 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.647546 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.647554 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.647566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.647574 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.748672 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.748700 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.748709 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.748719 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.748726 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.850863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.850910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.850921 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.850935 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.850944 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.952566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.952625 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.952634 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.952649 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:39 crc kubenswrapper[4693]: I1122 09:04:39.952681 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:39Z","lastTransitionTime":"2025-11-22T09:04:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.054152 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.054200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.054210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.054227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.054237 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.146085 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.146133 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.146158 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:40 crc kubenswrapper[4693]: E1122 09:04:40.146244 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:40 crc kubenswrapper[4693]: E1122 09:04:40.146347 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:40 crc kubenswrapper[4693]: E1122 09:04:40.146441 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.155973 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.156001 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.156008 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.156018 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.156025 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.258473 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.258508 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.258519 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.258533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.258544 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.360793 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.360823 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.360832 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.360857 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.360866 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.462293 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.462337 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.462346 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.462361 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.462371 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.463873 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/3.log" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.466485 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:04:40 crc kubenswrapper[4693]: E1122 09:04:40.466665 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.474749 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.482119 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.494055 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:38Z\\\",\\\"message\\\":\\\"4:38.744424 6764 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1122 09:04:38.743836 6764 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}\\\\nF1122 09:04:38.744539 6764 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z]\\\\nI1122 09:04:38.744556 6764 services_controller.go:360] Finished syncin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.501440 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.509393 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.516542 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.524393 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.532727 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.539904 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.548582 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.556253 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.562276 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.564643 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.564681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.564690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.564705 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.564724 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.571487 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.578916 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.586499 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.593242 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.599560 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:40Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.666432 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.666475 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.666484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.666501 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.666510 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.768428 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.768465 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.768477 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.768492 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.768503 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.870272 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.870314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.870325 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.870339 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.870349 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.972092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.972133 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.972141 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.972155 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:40 crc kubenswrapper[4693]: I1122 09:04:40.972163 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:40Z","lastTransitionTime":"2025-11-22T09:04:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.074137 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.074165 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.074183 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.074195 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.074203 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.146334 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:41 crc kubenswrapper[4693]: E1122 09:04:41.146441 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.175803 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.175830 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.175839 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.175862 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.175870 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.277967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.277996 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.278018 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.278031 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.278039 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.380064 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.380092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.380102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.380112 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.380119 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.481503 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.481530 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.481539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.481550 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.481557 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.582815 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.582870 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.582883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.582893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.582900 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.684686 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.684711 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.684719 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.684728 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.684736 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.786190 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.786219 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.786227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.786239 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.786247 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.887835 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.887881 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.887891 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.887901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.887908 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.989757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.989786 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.989794 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.989808 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:41 crc kubenswrapper[4693]: I1122 09:04:41.989815 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:41Z","lastTransitionTime":"2025-11-22T09:04:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.091688 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.091804 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.091891 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.091964 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.092015 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.146005 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.146015 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.146053 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:42 crc kubenswrapper[4693]: E1122 09:04:42.146115 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:42 crc kubenswrapper[4693]: E1122 09:04:42.146253 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:42 crc kubenswrapper[4693]: E1122 09:04:42.146361 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.193510 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.193553 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.193564 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.193581 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.193592 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.295280 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.295306 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.295313 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.295323 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.295330 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.397260 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.397384 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.397448 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.397508 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.397564 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.499515 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.499555 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.499566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.499580 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.499589 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.601960 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.601995 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.602003 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.602016 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.602025 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.703552 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.703588 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.703597 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.703610 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.703618 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.805463 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.805504 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.805513 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.805526 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.805536 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.906949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.906999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.907008 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.907021 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:42 crc kubenswrapper[4693]: I1122 09:04:42.907031 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:42Z","lastTransitionTime":"2025-11-22T09:04:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.009725 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.009972 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.010073 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.010139 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.010214 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.112300 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.112336 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.112344 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.112358 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.112368 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.146642 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:43 crc kubenswrapper[4693]: E1122 09:04:43.146758 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.214388 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.214428 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.214438 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.214489 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.214501 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.316236 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.316272 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.316280 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.316294 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.316305 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.418146 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.418183 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.418200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.418215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.418224 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.519932 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.519965 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.519975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.519990 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.519999 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.621390 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.621423 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.621431 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.621443 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.621453 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.723549 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.723578 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.723586 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.723598 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.723607 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.824901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.824930 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.824939 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.824949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.824957 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.926984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.927012 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.927022 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.927031 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:43 crc kubenswrapper[4693]: I1122 09:04:43.927039 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:43Z","lastTransitionTime":"2025-11-22T09:04:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.028821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.028876 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.028885 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.028899 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.028908 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.131422 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.131459 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.131468 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.131484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.131494 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.146732 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.146788 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:44 crc kubenswrapper[4693]: E1122 09:04:44.146833 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:44 crc kubenswrapper[4693]: E1122 09:04:44.146904 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.146788 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:44 crc kubenswrapper[4693]: E1122 09:04:44.147148 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.159550 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.168339 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.175564 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.182659 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.190016 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.197621 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.209187 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:38Z\\\",\\\"message\\\":\\\"4:38.744424 6764 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1122 09:04:38.743836 6764 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}\\\\nF1122 09:04:38.744539 6764 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z]\\\\nI1122 09:04:38.744556 6764 services_controller.go:360] Finished syncin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.218385 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.229142 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.233292 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.233341 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.233351 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.233364 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.233371 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.237909 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.246183 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.253501 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.261489 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.269543 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.278009 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.285605 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.292089 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:44Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.335523 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.335554 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.335563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.335574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.335582 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.436890 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.436941 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.436951 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.436964 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.436973 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.538778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.538807 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.538816 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.538829 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.538838 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.640907 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.640942 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.640950 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.640964 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.640972 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.742658 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.742818 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.742923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.743003 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.743059 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.844993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.845108 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.845168 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.845242 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.845303 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.946883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.946914 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.946922 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.946936 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:44 crc kubenswrapper[4693]: I1122 09:04:44.946944 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:44Z","lastTransitionTime":"2025-11-22T09:04:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.048927 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.048953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.048962 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.048974 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.048982 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.145876 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:45 crc kubenswrapper[4693]: E1122 09:04:45.146196 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.150440 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.150549 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.150607 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.150672 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.150726 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.252797 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.252830 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.252853 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.252872 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.252881 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.354618 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.354657 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.354666 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.354684 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.354693 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.456875 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.456910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.456918 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.456933 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.456942 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.558866 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.558903 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.558913 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.558929 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.558940 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.660399 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.660514 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.660589 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.660659 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.660721 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.761898 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.761928 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.761936 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.761949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.761958 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.863727 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.863763 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.863773 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.863784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.863791 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.965936 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.965966 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.965974 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.965984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:45 crc kubenswrapper[4693]: I1122 09:04:45.965991 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:45Z","lastTransitionTime":"2025-11-22T09:04:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.022677 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.022784 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:50.022766765 +0000 UTC m=+146.165269066 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.022995 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.023108 4693 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.023163 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:50.023153365 +0000 UTC m=+146.165655656 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.067953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.067989 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.067999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.068014 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.068023 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.124458 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.124496 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.124516 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124596 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124608 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124616 4693 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124640 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:50.124632291 +0000 UTC m=+146.267134581 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124682 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124690 4693 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124697 4693 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124715 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:50.124709036 +0000 UTC m=+146.267211326 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124742 4693 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.124759 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:50.124754672 +0000 UTC m=+146.267256963 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.146170 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.146205 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.146268 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.146340 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.146409 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.146532 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.169997 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.170023 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.170032 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.170041 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.170048 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.271459 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.271483 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.271492 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.271501 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.271508 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.334162 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.334188 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.334196 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.334207 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.334226 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.342645 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.345051 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.345072 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.345080 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.345088 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.345097 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.353492 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.355715 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.355739 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.355747 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.355757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.355764 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.364815 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.367072 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.367097 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.367106 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.367116 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.367122 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.375179 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.377370 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.377394 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.377402 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.377411 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.377418 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.385067 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:46Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:46Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:46 crc kubenswrapper[4693]: E1122 09:04:46.385193 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.385957 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.385984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.385993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.386002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.386009 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.487885 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.487910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.487920 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.487932 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.487939 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.589955 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.589982 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.589990 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.589999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.590007 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.691893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.691923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.691931 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.691940 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.691947 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.793478 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.793695 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.793703 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.793718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.793728 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.895798 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.895833 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.895862 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.895877 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.895887 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.997876 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.997913 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.997923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.997935 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:46 crc kubenswrapper[4693]: I1122 09:04:46.997944 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:46Z","lastTransitionTime":"2025-11-22T09:04:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.099859 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.099880 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.099889 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.099901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.099910 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.146543 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:47 crc kubenswrapper[4693]: E1122 09:04:47.146640 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.201725 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.201763 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.201772 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.201805 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.201815 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.304080 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.304129 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.304138 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.304151 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.304159 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.405746 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.405770 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.405778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.405788 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.405795 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.507814 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.507868 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.507881 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.507895 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.507904 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.610078 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.610106 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.610114 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.610127 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.610136 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.712140 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.712176 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.712185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.712199 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.712209 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.814049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.814083 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.814092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.814105 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.814113 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.915721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.915767 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.915778 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.915795 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:47 crc kubenswrapper[4693]: I1122 09:04:47.915805 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:47Z","lastTransitionTime":"2025-11-22T09:04:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.017372 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.017411 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.017420 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.017432 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.017441 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.118785 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.118807 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.118816 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.118828 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.118835 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.145995 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.146090 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:48 crc kubenswrapper[4693]: E1122 09:04:48.146186 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.146217 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:48 crc kubenswrapper[4693]: E1122 09:04:48.146344 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:48 crc kubenswrapper[4693]: E1122 09:04:48.146398 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.220466 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.220498 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.220507 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.220520 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.220528 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.322383 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.322413 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.322422 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.322435 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.322444 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.423945 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.423977 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.423986 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.423997 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.424004 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.525968 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.526003 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.526013 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.526025 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.526034 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.627694 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.627740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.627750 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.627770 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.627781 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.729370 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.729400 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.729409 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.729421 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.729428 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.831319 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.831367 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.831376 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.831389 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.831398 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.932868 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.932898 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.932910 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.932923 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:48 crc kubenswrapper[4693]: I1122 09:04:48.932931 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:48Z","lastTransitionTime":"2025-11-22T09:04:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.034978 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.035014 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.035022 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.035041 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.035049 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.136966 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.137019 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.137028 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.137049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.137062 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.146686 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:49 crc kubenswrapper[4693]: E1122 09:04:49.146836 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.239500 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.239539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.239547 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.239602 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.239620 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.341631 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.341663 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.341673 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.341685 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.341695 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.443612 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.443663 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.443678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.443696 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.443711 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.545808 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.545856 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.545868 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.545878 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.545885 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.648027 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.648065 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.648074 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.648089 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.648098 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.750162 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.750201 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.750210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.750227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.750236 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.852420 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.852459 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.852467 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.852482 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.852491 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.953926 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.953958 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.953967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.953979 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:49 crc kubenswrapper[4693]: I1122 09:04:49.953988 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:49Z","lastTransitionTime":"2025-11-22T09:04:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.055890 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.055928 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.055938 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.055953 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.055963 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.146102 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:50 crc kubenswrapper[4693]: E1122 09:04:50.146360 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.146387 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.146423 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:50 crc kubenswrapper[4693]: E1122 09:04:50.146682 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:50 crc kubenswrapper[4693]: E1122 09:04:50.146781 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.154166 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.157275 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.157307 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.157316 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.157330 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.157341 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.259705 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.259736 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.259745 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.259777 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.259785 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.361215 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.361241 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.361259 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.361270 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.361278 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.462273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.462300 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.462308 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.462318 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.462327 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.563635 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.563670 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.563679 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.563693 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.563703 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.665558 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.665710 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.665779 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.665869 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.665946 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.767190 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.767211 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.767218 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.767228 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.767235 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.869205 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.869231 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.869241 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.869257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.869265 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.970634 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.970660 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.970668 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.970676 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:50 crc kubenswrapper[4693]: I1122 09:04:50.970699 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:50Z","lastTransitionTime":"2025-11-22T09:04:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.072004 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.072046 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.072056 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.072076 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.072087 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.146511 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:51 crc kubenswrapper[4693]: E1122 09:04:51.146610 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.147188 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:04:51 crc kubenswrapper[4693]: E1122 09:04:51.147328 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.174199 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.174228 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.174236 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.174273 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.174281 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.276344 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.276380 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.276389 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.276402 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.276411 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.377814 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.377879 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.377889 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.377901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.377910 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.479682 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.479733 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.479743 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.479753 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.479760 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.581277 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.581324 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.581333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.581348 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.581357 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.682817 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.682878 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.682888 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.682901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.682909 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.784758 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.784788 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.784799 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.784811 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.784819 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.887047 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.887070 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.887079 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.887088 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.887095 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.988811 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.988869 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.988878 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.988894 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:51 crc kubenswrapper[4693]: I1122 09:04:51.988903 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:51Z","lastTransitionTime":"2025-11-22T09:04:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.090678 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.090721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.090730 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.090748 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.090757 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.146482 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.146537 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:52 crc kubenswrapper[4693]: E1122 09:04:52.146612 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.146655 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:52 crc kubenswrapper[4693]: E1122 09:04:52.146788 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:52 crc kubenswrapper[4693]: E1122 09:04:52.146899 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.192527 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.192564 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.192575 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.192590 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.192599 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.296300 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.296335 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.296344 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.296357 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.296368 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.398295 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.398327 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.398335 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.398348 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.398356 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.500681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.500707 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.500714 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.500727 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.500737 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.603228 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.603365 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.603507 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.603651 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.603744 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.705827 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.705878 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.705889 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.705901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.705911 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.807971 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.808024 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.808034 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.808049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.808057 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.909740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.909767 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.909775 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.909785 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:52 crc kubenswrapper[4693]: I1122 09:04:52.909792 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:52Z","lastTransitionTime":"2025-11-22T09:04:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.011247 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.011304 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.011313 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.011329 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.011341 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.113369 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.113396 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.113404 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.113417 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.113426 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.146420 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:53 crc kubenswrapper[4693]: E1122 09:04:53.146515 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.214766 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.214795 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.214804 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.214814 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.214821 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.316976 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.317007 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.317017 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.317027 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.317036 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.418972 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.419025 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.419035 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.419050 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.419059 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.520695 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.520729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.520740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.520754 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.520763 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.621879 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.621909 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.621919 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.621931 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.621938 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.723829 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.723875 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.723887 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.723896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.723904 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.825068 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.825097 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.825105 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.825116 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.825124 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.926672 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.926703 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.926712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.926721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:53 crc kubenswrapper[4693]: I1122 09:04:53.926728 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:53Z","lastTransitionTime":"2025-11-22T09:04:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.028210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.028232 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.028241 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.028249 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.028256 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.129831 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.129880 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.129889 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.129899 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.129906 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.146362 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:54 crc kubenswrapper[4693]: E1122 09:04:54.146445 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.146563 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:54 crc kubenswrapper[4693]: E1122 09:04:54.146815 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.147252 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:54 crc kubenswrapper[4693]: E1122 09:04:54.147574 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.155103 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7978824f-5c1f-4d92-9aaa-c93059d847f0\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6622d62fffcf75abc0135291ed177bb6626a5ab62516d486cb8a57c2c33f71b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b704bd1eb0c9f3e5d1776ccbb017e84dac77fe401c86c21f1943eded19ad5528\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b704bd1eb0c9f3e5d1776ccbb017e84dac77fe401c86c21f1943eded19ad5528\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.163094 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.169421 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-qsptv" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5f914d43-b154-48b4-9a79-971e20908551\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://201f0dfda7e1d5ac794c09e3be45439b8e5df65ec6d1bdd1b52b1e24c9fedb62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-qlj26\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-qsptv\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.177196 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-2s9rh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:28Z\\\",\\\"message\\\":\\\"2025-11-22T09:03:43+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58\\\\n2025-11-22T09:03:43+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_e4949b5e-c37a-48b6-89af-6e35513a5b58 to /host/opt/cni/bin/\\\\n2025-11-22T09:03:43Z [verbose] multus-daemon started\\\\n2025-11-22T09:03:43Z [verbose] Readiness Indicator file check\\\\n2025-11-22T09:04:28Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:04:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cx6z2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-2s9rh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.186169 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f125af82-734e-4cd7-9324-f910da2ee679\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:52Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e52fdcdb46539215a208b7aefee1bed566b38b90f9b7e7c764335da2aa52e511\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9806af76c3c45b579b4b9b5a4cae58b9c5b7dbacd6ab5ad086b3044ecb976e23\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0e0877aa298bae71dc1157b0e7875ca8de81bac7e3e19cf69e39adcd4a296b0c\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://308a65b64597f3714a5f12adeea440621ff1a65ec39add8c205b8eb163d348d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cfb62196d71f0f52b6afba3f0f2dc1556afde834ea41b78a7a5e7e13ac5924f1\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-22T09:03:41Z\\\",\\\"message\\\":\\\"heck-endpoints-signer@1763802206\\\\\\\\\\\\\\\" (2025-11-22 09:03:25 +0000 UTC to 2025-12-22 09:03:26 +0000 UTC (now=2025-11-22 09:03:41.496938109 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497524 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497554 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::requestheader-client-ca-file\\\\nI1122 09:03:41.497627 1 named_certificates.go:53] \\\\\\\"Loaded SNI cert\\\\\\\" index=0 certName=\\\\\\\"self-signed loopback\\\\\\\" certDetail=\\\\\\\"\\\\\\\\\\\\\\\"apiserver-loopback-client@1763802216\\\\\\\\\\\\\\\" [serving] validServingFor=[apiserver-loopback-client] issuer=\\\\\\\\\\\\\\\"apiserver-loopback-client-ca@1763802216\\\\\\\\\\\\\\\" (2025-11-22 08:03:36 +0000 UTC to 2026-11-22 08:03:36 +0000 UTC (now=2025-11-22 09:03:41.497610484 +0000 UTC))\\\\\\\"\\\\nI1122 09:03:41.497646 1 secure_serving.go:213] Serving securely on [::]:17697\\\\nI1122 09:03:41.497662 1 genericapiserver.go:683] [graceful-termination] waiting for shutdown to be initiated\\\\nI1122 09:03:41.497493 1 requestheader_controller.go:172] Starting RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497674 1 shared_informer.go:313] Waiting for caches to sync for RequestHeaderAuthRequestController\\\\nI1122 09:03:41.497678 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"WatchListClient\\\\\\\" enabled=false\\\\nI1122 09:03:41.497691 1 tlsconfig.go:243] \\\\\\\"Starting DynamicServingCertificateController\\\\\\\"\\\\nI1122 09:03:41.497699 1 envvar.go:172] \\\\\\\"Feature gate default state\\\\\\\" feature=\\\\\\\"InformerResourceVersion\\\\\\\" enabled=false\\\\nI1122 09:03:41.497510 1 configmap_cafile_content.go:205] \\\\\\\"Starting controller\\\\\\\" name=\\\\\\\"client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\\\\"\\\\nI1122 09:03:41.497869 1 shared_informer.go:313] Waiting for caches to sync for client-ca::kube-system::extension-apiserver-authentication::client-ca-file\\\\nF1122 09:03:41.498520 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dbfce4580f66f11074d0ffd4abd1a641d88a20889e196c9f36bf69fe13e36e70\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://12bf92fef4a46043749c70a06f1d93879aa63101b6b82279d35fcfcb7078c24a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.193563 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.199942 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-j6drd" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2b387eae-f6b6-42ec-9736-176d13068eea\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b339d7e5345d8de4158873935fb33a9cf6753e86b60839a4d659d482661614a9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ng2gg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:48Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-j6drd\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.206423 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-t4blm" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-dp9tg\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:55Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-t4blm\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.213952 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.221907 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"651f9572-e238-4830-96ff-e1a7af32fb35\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:19Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79a63d08db4a34c9868693527ca78ed08c85f9b919a84326566c1f4bef97741e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f1423457d5549656e802ff33852224ceeec60a3ca00f172ea5362cb619ae6bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://57d7d79463b869286ba1d2743f6228666f061355155bcf23dba5cce5e327803e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://051c096a0d43742340d6ca4778de344bf7084738e07d923ea6ce8761a19a3d23\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:25Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.232950 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.232975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.232983 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.232995 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.233002 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.238944 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7007d901-fc52-4723-a949-db71619b3305\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://51f2e2700a28a15df49657e01e04c6606f3fa52235850609480c82794ced5c77\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-pzbdm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-scx6r\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.251301 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2fa68d41-61c5-4781-8984-add9804c1b4b\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-22T09:04:38Z\\\",\\\"message\\\":\\\"4:38.744424 6764 base_network_controller_pods.go:477] [default/openshift-network-diagnostics/network-check-source-55646444c4-trplf] creating logical port openshift-network-diagnostics_network-check-source-55646444c4-trplf for pod on switch crc\\\\nI1122 09:04:38.743836 6764 loadbalancer.go:304] Deleted 0 stale LBs for map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-machine-api/cluster-autoscaler-operator\\\\\\\"}\\\\nF1122 09:04:38.744539 6764 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/node?timeout=10s\\\\\\\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:38Z is after 2025-08-24T17:21:41Z]\\\\nI1122 09:04:38.744556 6764 services_controller.go:360] Finished syncin\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-22T09:04:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-p2ndl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-852ps\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.259395 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4a3c7aa3fbc1a4c51755990955732ee0cec55f234c653aedd02e936e23d9bdbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.266601 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://253ea64345db21edaa0530653a7c13db4c25ad77575a59040a7a47bce63cb043\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.274821 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f69fa81cad0bd662eae9d5316edff488c65bb3a5db0348518a695aa6efbccba2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://6ba096da57e8ba191d1ea7aa3d80d8d435920177ffaef88c2a7da9dfc66ff5c9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.283567 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"bd8b5bd3-66f1-4495-babd-04ae3e5cea6f\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e58977765a2d337af05250a48b261e0c2ea0e3f1e2c5abbe26d57cdd3c37a5fc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://33d57fc447d32a21be99e1e6b64d34aa68d725e2d7e9f034d7a0be4a84402d08\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:42Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9f897cfaf8196375f672fd50c71f16c66a684ba9881a8103a7520458cda1f27b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:43Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:43Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f156f26708cd45b20153ec4839c559e2bfb7e49a8ba4bd3225e7d34d4237f8e6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:44Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://427510d1c2f995466e600bcb55598e4a889c0eb50da6f23f4ae901f76634d2fe\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:45Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:45Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f40b25257749dc4aed683dce47318b2897ce9fe6a4ec3a69ba81f0a6ac57ff93\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:46Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:46Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://867bccf47cc9b503f1a4fab8666f2875ee804fff39ce5bcae216f354174efd2c\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-22T09:03:47Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-22T09:03:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-lzt7h\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:42Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-5rjtn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.290700 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"01958f13-f4ba-4d92-bd7e-33fa3378f029\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:54Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:55Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b7f1e78cbbff60fcc4bde60019d398eb17d998ab9988a28dc48f0617e8c0b95f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://faf0759fdd4c0ed4e9b58fc602e320c4dc026af6b6f0ff2fb6f0159ce9e42a59\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jc45w\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:54Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sxbhn\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.298470 4693 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"75970761-8c18-4dbc-9116-b1f7b092fcbb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:26Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:44Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-22T09:03:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8e43db27b62aff4c4199408a9c196bce2702edbcf35030c11fb211abf06aad20\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cc42c084706c403f8099b681d7ae22a7dee047b679de933596cb506924260aa2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:24Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://58b878a0696635f58a7aae7b7484b4e6b093ebfdb1d042984e5a427b9bcd744f\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b75777c6c3d16188ee9c287e860642e1810c36f9a1df2368efe1e41c983ceb1\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-22T09:03:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-22T09:03:24Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:54Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.335954 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.335989 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.335999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.336015 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.336025 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.437679 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.437712 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.437721 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.437736 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.437753 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.539462 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.539503 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.539514 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.539531 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.539540 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.641681 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.641709 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.641718 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.641727 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.641735 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.743658 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.743690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.743698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.743710 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.743717 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.845665 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.845698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.845708 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.845722 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.845731 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.947659 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.947691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.947700 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.947711 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:54 crc kubenswrapper[4693]: I1122 09:04:54.947718 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:54Z","lastTransitionTime":"2025-11-22T09:04:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.049513 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.049543 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.049552 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.049563 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.049571 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.145931 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:55 crc kubenswrapper[4693]: E1122 09:04:55.146036 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.151124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.151149 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.151157 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.151167 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.151174 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.252411 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.252434 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.252443 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.252454 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.252462 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.354539 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.354577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.354587 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.354601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.354612 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.456517 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.456549 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.456559 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.456570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.456578 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.558329 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.558363 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.558373 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.558385 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.558395 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.660666 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.660701 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.660711 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.660724 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.660734 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.762303 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.762333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.762343 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.762353 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.762362 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.864043 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.864092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.864106 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.864123 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.864135 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.966748 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.966780 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.966790 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.966808 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:55 crc kubenswrapper[4693]: I1122 09:04:55.966817 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:55Z","lastTransitionTime":"2025-11-22T09:04:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.068755 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.068784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.068793 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.068804 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.068811 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.145754 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.145835 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.145886 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.145932 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.146045 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.146136 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.170381 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.170418 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.170428 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.170441 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.170450 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.272517 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.272546 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.272556 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.272566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.272574 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.374371 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.374552 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.374620 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.374691 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.374754 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.476104 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.476144 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.476153 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.476169 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.476177 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.577899 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.577930 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.577938 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.577949 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.577958 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.602878 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.602911 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.602920 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.602932 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.602940 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.611810 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.614620 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.614646 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.614654 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.614664 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.614671 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.622616 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.624632 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.624662 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.624671 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.624683 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.624690 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.632468 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.634680 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.634709 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.634717 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.634727 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.634737 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.642488 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.644741 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.644763 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.644772 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.644784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.644791 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.652270 4693 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404552Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865352Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-22T09:04:56Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e67c00bb-ce74-4d2e-879d-8186ebb300ff\\\",\\\"systemUUID\\\":\\\"15f0b692-3998-4879-8228-aabd9ff9e80a\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-22T09:04:56Z is after 2025-08-24T17:21:41Z" Nov 22 09:04:56 crc kubenswrapper[4693]: E1122 09:04:56.652393 4693 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.679184 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.679218 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.679228 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.679245 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.679255 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.781111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.781140 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.781148 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.781158 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.781166 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.882314 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.882346 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.882355 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.882365 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.882373 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.983653 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.983679 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.983687 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.983698 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:56 crc kubenswrapper[4693]: I1122 09:04:56.983704 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:56Z","lastTransitionTime":"2025-11-22T09:04:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.085650 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.085679 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.085690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.085702 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.085709 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.145711 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:57 crc kubenswrapper[4693]: E1122 09:04:57.145821 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.186821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.186865 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.186875 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.186884 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.186892 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.287967 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.287993 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.288002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.288014 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.288022 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.389461 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.389482 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.389489 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.389498 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.389505 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.491543 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.491578 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.491588 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.491601 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.491610 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.593433 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.593463 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.593474 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.593484 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.593493 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.695468 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.695498 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.695507 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.695517 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.695525 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.797225 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.797263 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.797271 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.797284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.797292 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.899494 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.899529 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.899536 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.899549 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:57 crc kubenswrapper[4693]: I1122 09:04:57.899558 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:57Z","lastTransitionTime":"2025-11-22T09:04:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.001234 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.001265 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.001274 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.001285 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.001293 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.103669 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.103713 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.103723 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.103740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.103754 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.146175 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:04:58 crc kubenswrapper[4693]: E1122 09:04:58.146304 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.146177 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.146376 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:04:58 crc kubenswrapper[4693]: E1122 09:04:58.146466 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:04:58 crc kubenswrapper[4693]: E1122 09:04:58.146568 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.204981 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.205007 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.205018 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.205031 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.205039 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.306161 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.306194 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.306204 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.306214 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.306224 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.407608 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.407630 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.407645 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.407654 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.407660 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.509704 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.509729 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.509736 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.509746 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.509752 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.610950 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.610986 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.610994 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.611006 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.611013 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.713049 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.713082 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.713099 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.713112 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.713121 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.814787 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.814821 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.814831 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.814861 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.814872 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.916733 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.916763 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.916791 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.916801 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:58 crc kubenswrapper[4693]: I1122 09:04:58.916809 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:58Z","lastTransitionTime":"2025-11-22T09:04:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.018790 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.018815 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.018822 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.018830 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.018837 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.031563 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:59 crc kubenswrapper[4693]: E1122 09:04:59.031700 4693 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:59 crc kubenswrapper[4693]: E1122 09:04:59.031747 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs podName:fbc29b7c-60ea-4d65-ae31-fee4b8e7f833 nodeName:}" failed. No retries permitted until 2025-11-22 09:06:03.031735239 +0000 UTC m=+159.174237530 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs") pod "network-metrics-daemon-t4blm" (UID: "fbc29b7c-60ea-4d65-ae31-fee4b8e7f833") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.120817 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.120859 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.120868 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.120877 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.120885 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.145810 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:04:59 crc kubenswrapper[4693]: E1122 09:04:59.145914 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.222096 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.222137 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.222145 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.222156 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.222164 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.323545 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.323574 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.323581 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.323590 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.323597 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.425672 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.425803 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.425917 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.425999 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.426071 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.527664 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.527714 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.527723 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.527737 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.527745 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.629854 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.629891 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.629899 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.629912 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.629921 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.731518 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.731556 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.731565 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.731577 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.731584 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.833116 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.833152 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.833161 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.833170 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.833176 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.935180 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.935211 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.935226 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.935236 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:04:59 crc kubenswrapper[4693]: I1122 09:04:59.935244 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:04:59Z","lastTransitionTime":"2025-11-22T09:04:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.037107 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.037226 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.037309 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.037406 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.037463 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.139012 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.139034 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.139042 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.139052 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.139060 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.146638 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.146665 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.146646 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:00 crc kubenswrapper[4693]: E1122 09:05:00.146742 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:00 crc kubenswrapper[4693]: E1122 09:05:00.146800 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:00 crc kubenswrapper[4693]: E1122 09:05:00.146901 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.240884 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.240930 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.240941 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.240951 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.240960 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.343061 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.343092 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.343101 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.343111 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.343118 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.444671 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.444800 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.444881 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.444946 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.445008 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.547057 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.547229 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.547301 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.547375 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.547441 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.649191 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.649243 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.649257 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.649275 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.649288 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.751249 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.751283 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.751292 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.751306 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.751315 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.852686 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.852835 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.853002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.853080 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.853144 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.955216 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.955243 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.955252 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.955263 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:00 crc kubenswrapper[4693]: I1122 09:05:00.955271 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:00Z","lastTransitionTime":"2025-11-22T09:05:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.056543 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.056565 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.056573 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.056583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.056591 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.146467 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:01 crc kubenswrapper[4693]: E1122 09:05:01.146562 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.157570 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.157602 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.157610 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.157620 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.157629 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.259098 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.259122 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.259132 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.259143 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.259149 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.360925 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.360956 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.360963 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.360974 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.360982 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.463106 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.463142 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.463153 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.463167 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.463175 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.565098 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.565128 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.565156 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.565166 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.565175 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.667029 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.667059 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.667068 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.667080 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.667088 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.768981 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.769008 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.769016 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.769026 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.769034 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.871121 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.871159 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.871167 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.871185 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.871194 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.973165 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.973200 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.973210 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.973223 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:01 crc kubenswrapper[4693]: I1122 09:05:01.973232 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:01Z","lastTransitionTime":"2025-11-22T09:05:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.075033 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.075093 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.075102 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.075113 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.075121 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.146678 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.146741 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.146926 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:02 crc kubenswrapper[4693]: E1122 09:05:02.146949 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:02 crc kubenswrapper[4693]: E1122 09:05:02.147165 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:02 crc kubenswrapper[4693]: E1122 09:05:02.147214 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.157487 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.176858 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.176885 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.176893 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.176904 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.176913 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.278743 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.278783 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.278792 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.278805 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.278814 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.380284 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.380320 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.380329 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.380342 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.380366 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.482094 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.482128 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.482136 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.482149 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.482159 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.584028 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.584059 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.584068 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.584080 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.584089 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.685378 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.685416 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.685426 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.685436 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.685446 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.787321 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.787381 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.787390 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.787403 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.787413 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.889343 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.889388 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.889396 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.889409 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.889419 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.991344 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.991396 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.991406 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.991421 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:02 crc kubenswrapper[4693]: I1122 09:05:02.991432 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:02Z","lastTransitionTime":"2025-11-22T09:05:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.093093 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.093124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.093135 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.093145 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.093152 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.146384 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:03 crc kubenswrapper[4693]: E1122 09:05:03.146502 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.146943 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:05:03 crc kubenswrapper[4693]: E1122 09:05:03.147078 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.194388 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.194416 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.194424 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.194434 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.194441 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.296449 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.296486 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.296495 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.296508 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.296517 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.398650 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.398684 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.398693 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.398709 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.398718 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.500779 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.500822 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.500833 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.500863 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.500871 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.603013 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.603053 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.603063 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.603078 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.603088 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.705150 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.705186 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.705198 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.705211 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.705219 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.806942 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.806975 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.806984 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.806995 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.807004 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.909187 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.909220 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.909228 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.909239 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:03 crc kubenswrapper[4693]: I1122 09:05:03.909247 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:03Z","lastTransitionTime":"2025-11-22T09:05:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.011296 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.011333 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.011341 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.011364 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.011375 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.113055 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.113094 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.113103 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.113115 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.113124 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.146446 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.146511 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:04 crc kubenswrapper[4693]: E1122 09:05:04.146543 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.146581 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:04 crc kubenswrapper[4693]: E1122 09:05:04.146757 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:04 crc kubenswrapper[4693]: E1122 09:05:04.146832 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.168460 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-j6drd" podStartSLOduration=82.16844719 podStartE2EDuration="1m22.16844719s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.168441159 +0000 UTC m=+100.310943450" watchObservedRunningTime="2025-11-22 09:05:04.16844719 +0000 UTC m=+100.310949481" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.190736 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=45.190719193 podStartE2EDuration="45.190719193s" podCreationTimestamp="2025-11-22 09:04:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.190426119 +0000 UTC m=+100.332928410" watchObservedRunningTime="2025-11-22 09:05:04.190719193 +0000 UTC m=+100.333221484" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.197427 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podStartSLOduration=83.197409645 podStartE2EDuration="1m23.197409645s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.197057881 +0000 UTC m=+100.339560172" watchObservedRunningTime="2025-11-22 09:05:04.197409645 +0000 UTC m=+100.339911936" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.216391 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.216429 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.216439 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.216462 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.216470 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.232133 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=2.232118381 podStartE2EDuration="2.232118381s" podCreationTimestamp="2025-11-22 09:05:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.231017672 +0000 UTC m=+100.373519963" watchObservedRunningTime="2025-11-22 09:05:04.232118381 +0000 UTC m=+100.374620671" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.273266 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sxbhn" podStartSLOduration=82.273248179 podStartE2EDuration="1m22.273248179s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.273235916 +0000 UTC m=+100.415738207" watchObservedRunningTime="2025-11-22 09:05:04.273248179 +0000 UTC m=+100.415750460" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.273443 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-5rjtn" podStartSLOduration=83.273439129 podStartE2EDuration="1m23.273439129s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.266009611 +0000 UTC m=+100.408511913" watchObservedRunningTime="2025-11-22 09:05:04.273439129 +0000 UTC m=+100.415941420" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.281546 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=80.281536959 podStartE2EDuration="1m20.281536959s" podCreationTimestamp="2025-11-22 09:03:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.281469141 +0000 UTC m=+100.423971432" watchObservedRunningTime="2025-11-22 09:05:04.281536959 +0000 UTC m=+100.424039250" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.288011 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=14.288005243 podStartE2EDuration="14.288005243s" podCreationTimestamp="2025-11-22 09:04:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.287484228 +0000 UTC m=+100.429986520" watchObservedRunningTime="2025-11-22 09:05:04.288005243 +0000 UTC m=+100.430507534" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.302120 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-qsptv" podStartSLOduration=83.302111517 podStartE2EDuration="1m23.302111517s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.301715369 +0000 UTC m=+100.444217660" watchObservedRunningTime="2025-11-22 09:05:04.302111517 +0000 UTC m=+100.444613798" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.310353 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-2s9rh" podStartSLOduration=83.310332639 podStartE2EDuration="1m23.310332639s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.310205248 +0000 UTC m=+100.452707539" watchObservedRunningTime="2025-11-22 09:05:04.310332639 +0000 UTC m=+100.452834930" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.318695 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.318724 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.318733 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.318745 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.318754 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.321185 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=82.321174991 podStartE2EDuration="1m22.321174991s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:04.320647416 +0000 UTC m=+100.463149706" watchObservedRunningTime="2025-11-22 09:05:04.321174991 +0000 UTC m=+100.463677283" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.420715 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.420757 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.420768 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.420782 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.420792 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.522569 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.522605 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.522613 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.522626 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.522635 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.625090 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.625124 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.625135 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.625146 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.625154 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.726566 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.726598 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.726606 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.726619 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.726627 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.828207 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.828245 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.828255 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.828269 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.828279 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.929839 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.929896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.929905 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.929920 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:04 crc kubenswrapper[4693]: I1122 09:05:04.929929 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:04Z","lastTransitionTime":"2025-11-22T09:05:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.031836 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.031883 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.031892 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.031906 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.031916 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.133474 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.133509 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.133519 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.133533 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.133543 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.145769 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:05 crc kubenswrapper[4693]: E1122 09:05:05.145896 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.236427 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.236875 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.236960 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.237027 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.237085 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.338884 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.339386 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.339464 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.339544 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.339614 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.441690 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.441715 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.441723 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.441732 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.441740 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.543150 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.543189 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.543199 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.543214 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.543223 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.645227 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.645272 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.645280 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.645291 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.645299 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.746825 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.746872 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.746882 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.746894 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.746902 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.848002 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.848029 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.848038 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.848051 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.848059 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.950271 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.950299 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.950310 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.950322 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:05 crc kubenswrapper[4693]: I1122 09:05:05.950329 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:05Z","lastTransitionTime":"2025-11-22T09:05:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.051557 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.051583 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.051592 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.051602 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.051611 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.146596 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.146681 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:06 crc kubenswrapper[4693]: E1122 09:05:06.146788 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.146808 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:06 crc kubenswrapper[4693]: E1122 09:05:06.146925 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:06 crc kubenswrapper[4693]: E1122 09:05:06.146989 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.153307 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.153344 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.153352 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.153365 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.153383 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.254798 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.254835 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.254876 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.254896 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.254907 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.356700 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.356724 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.356731 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.356740 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.356747 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.458766 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.458901 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.458976 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.459040 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.459106 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.560962 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.561177 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.561264 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.561342 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.561427 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.662784 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.662809 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.662817 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.662830 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.662838 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.763955 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.763996 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.764004 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.764018 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.764029 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.865326 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.865358 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.865367 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.865387 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.865395 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.951018 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.951051 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.951061 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.951076 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.951085 4693 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-22T09:05:06Z","lastTransitionTime":"2025-11-22T09:05:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.977430 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq"] Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.977707 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.979659 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.980142 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.980615 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 22 09:05:06 crc kubenswrapper[4693]: I1122 09:05:06.980631 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.097590 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/756dde41-16fc-4ad5-9e96-da481eeb2abe-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.097623 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/756dde41-16fc-4ad5-9e96-da481eeb2abe-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.097661 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/756dde41-16fc-4ad5-9e96-da481eeb2abe-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.097680 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/756dde41-16fc-4ad5-9e96-da481eeb2abe-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.097696 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/756dde41-16fc-4ad5-9e96-da481eeb2abe-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.146203 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:07 crc kubenswrapper[4693]: E1122 09:05:07.146474 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.197944 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/756dde41-16fc-4ad5-9e96-da481eeb2abe-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.197975 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/756dde41-16fc-4ad5-9e96-da481eeb2abe-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.197996 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/756dde41-16fc-4ad5-9e96-da481eeb2abe-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.198039 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/756dde41-16fc-4ad5-9e96-da481eeb2abe-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.198061 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/756dde41-16fc-4ad5-9e96-da481eeb2abe-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.198055 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/756dde41-16fc-4ad5-9e96-da481eeb2abe-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.198157 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/756dde41-16fc-4ad5-9e96-da481eeb2abe-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.198936 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/756dde41-16fc-4ad5-9e96-da481eeb2abe-service-ca\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.202683 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/756dde41-16fc-4ad5-9e96-da481eeb2abe-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.209968 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/756dde41-16fc-4ad5-9e96-da481eeb2abe-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-p4rcq\" (UID: \"756dde41-16fc-4ad5-9e96-da481eeb2abe\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.287218 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.521928 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" event={"ID":"756dde41-16fc-4ad5-9e96-da481eeb2abe","Type":"ContainerStarted","Data":"46224b9d5288b4ad1b2593471c527077136e48f8a34bf1b75dcc3cd10f3ea242"} Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.522122 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" event={"ID":"756dde41-16fc-4ad5-9e96-da481eeb2abe","Type":"ContainerStarted","Data":"bd65c250fd9ee3c49a7b256d2e03bc087d7f7835698e53d5ef74b77cd022b307"} Nov 22 09:05:07 crc kubenswrapper[4693]: I1122 09:05:07.531759 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-p4rcq" podStartSLOduration=86.53174494699999 podStartE2EDuration="1m26.531744947s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:07.531092515 +0000 UTC m=+103.673594806" watchObservedRunningTime="2025-11-22 09:05:07.531744947 +0000 UTC m=+103.674247239" Nov 22 09:05:08 crc kubenswrapper[4693]: I1122 09:05:08.146460 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:08 crc kubenswrapper[4693]: I1122 09:05:08.146494 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:08 crc kubenswrapper[4693]: I1122 09:05:08.146509 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:08 crc kubenswrapper[4693]: E1122 09:05:08.146562 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:08 crc kubenswrapper[4693]: E1122 09:05:08.146638 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:08 crc kubenswrapper[4693]: E1122 09:05:08.146693 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:09 crc kubenswrapper[4693]: I1122 09:05:09.145737 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:09 crc kubenswrapper[4693]: E1122 09:05:09.146116 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:10 crc kubenswrapper[4693]: I1122 09:05:10.146302 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:10 crc kubenswrapper[4693]: I1122 09:05:10.146329 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:10 crc kubenswrapper[4693]: I1122 09:05:10.146349 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:10 crc kubenswrapper[4693]: E1122 09:05:10.146432 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:10 crc kubenswrapper[4693]: E1122 09:05:10.146485 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:10 crc kubenswrapper[4693]: E1122 09:05:10.146538 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:11 crc kubenswrapper[4693]: I1122 09:05:11.146373 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:11 crc kubenswrapper[4693]: E1122 09:05:11.146606 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:12 crc kubenswrapper[4693]: I1122 09:05:12.146084 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:12 crc kubenswrapper[4693]: E1122 09:05:12.146176 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:12 crc kubenswrapper[4693]: I1122 09:05:12.146343 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:12 crc kubenswrapper[4693]: E1122 09:05:12.146392 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:12 crc kubenswrapper[4693]: I1122 09:05:12.146556 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:12 crc kubenswrapper[4693]: E1122 09:05:12.146608 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:13 crc kubenswrapper[4693]: I1122 09:05:13.145829 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:13 crc kubenswrapper[4693]: E1122 09:05:13.145956 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.145769 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.145886 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.145937 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:14 crc kubenswrapper[4693]: E1122 09:05:14.146589 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:14 crc kubenswrapper[4693]: E1122 09:05:14.147008 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:14 crc kubenswrapper[4693]: E1122 09:05:14.147057 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.147223 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:05:14 crc kubenswrapper[4693]: E1122 09:05:14.147355 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-852ps_openshift-ovn-kubernetes(2fa68d41-61c5-4781-8984-add9804c1b4b)\"" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.537382 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/1.log" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.537727 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/0.log" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.537763 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7" containerID="6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f" exitCode=1 Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.537786 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerDied","Data":"6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f"} Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.537826 4693 scope.go:117] "RemoveContainer" containerID="b4e11e4823688268ea9cae9b2aad081db345bde5ae16fd30fcc2b08fb1d5d663" Nov 22 09:05:14 crc kubenswrapper[4693]: I1122 09:05:14.538097 4693 scope.go:117] "RemoveContainer" containerID="6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f" Nov 22 09:05:14 crc kubenswrapper[4693]: E1122 09:05:14.538224 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-2s9rh_openshift-multus(9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7)\"" pod="openshift-multus/multus-2s9rh" podUID="9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7" Nov 22 09:05:15 crc kubenswrapper[4693]: I1122 09:05:15.145688 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:15 crc kubenswrapper[4693]: E1122 09:05:15.145777 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:15 crc kubenswrapper[4693]: I1122 09:05:15.543641 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/1.log" Nov 22 09:05:16 crc kubenswrapper[4693]: I1122 09:05:16.146263 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:16 crc kubenswrapper[4693]: I1122 09:05:16.146339 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:16 crc kubenswrapper[4693]: I1122 09:05:16.146484 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:16 crc kubenswrapper[4693]: E1122 09:05:16.146471 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:16 crc kubenswrapper[4693]: E1122 09:05:16.146554 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:16 crc kubenswrapper[4693]: E1122 09:05:16.146609 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:17 crc kubenswrapper[4693]: I1122 09:05:17.146520 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:17 crc kubenswrapper[4693]: E1122 09:05:17.146710 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:18 crc kubenswrapper[4693]: I1122 09:05:18.146682 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:18 crc kubenswrapper[4693]: E1122 09:05:18.146770 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:18 crc kubenswrapper[4693]: I1122 09:05:18.146790 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:18 crc kubenswrapper[4693]: I1122 09:05:18.146886 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:18 crc kubenswrapper[4693]: E1122 09:05:18.146935 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:18 crc kubenswrapper[4693]: E1122 09:05:18.147133 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:19 crc kubenswrapper[4693]: I1122 09:05:19.146366 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:19 crc kubenswrapper[4693]: E1122 09:05:19.146467 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:20 crc kubenswrapper[4693]: I1122 09:05:20.145700 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:20 crc kubenswrapper[4693]: I1122 09:05:20.145731 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:20 crc kubenswrapper[4693]: I1122 09:05:20.145708 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:20 crc kubenswrapper[4693]: E1122 09:05:20.145806 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:20 crc kubenswrapper[4693]: E1122 09:05:20.145876 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:20 crc kubenswrapper[4693]: E1122 09:05:20.145919 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:21 crc kubenswrapper[4693]: I1122 09:05:21.145699 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:21 crc kubenswrapper[4693]: E1122 09:05:21.145805 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:22 crc kubenswrapper[4693]: I1122 09:05:22.146552 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:22 crc kubenswrapper[4693]: I1122 09:05:22.146584 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:22 crc kubenswrapper[4693]: E1122 09:05:22.146637 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:22 crc kubenswrapper[4693]: E1122 09:05:22.146758 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:22 crc kubenswrapper[4693]: I1122 09:05:22.146866 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:22 crc kubenswrapper[4693]: E1122 09:05:22.146921 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:23 crc kubenswrapper[4693]: I1122 09:05:23.146500 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:23 crc kubenswrapper[4693]: E1122 09:05:23.146638 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:24 crc kubenswrapper[4693]: I1122 09:05:24.145708 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:24 crc kubenswrapper[4693]: I1122 09:05:24.145721 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:24 crc kubenswrapper[4693]: I1122 09:05:24.146607 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:24 crc kubenswrapper[4693]: E1122 09:05:24.146599 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:24 crc kubenswrapper[4693]: E1122 09:05:24.146699 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:24 crc kubenswrapper[4693]: E1122 09:05:24.146745 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:24 crc kubenswrapper[4693]: E1122 09:05:24.178487 4693 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 22 09:05:24 crc kubenswrapper[4693]: E1122 09:05:24.226485 4693 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 09:05:25 crc kubenswrapper[4693]: I1122 09:05:25.145928 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:25 crc kubenswrapper[4693]: E1122 09:05:25.146033 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:26 crc kubenswrapper[4693]: I1122 09:05:26.145975 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:26 crc kubenswrapper[4693]: I1122 09:05:26.146004 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:26 crc kubenswrapper[4693]: I1122 09:05:26.146044 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:26 crc kubenswrapper[4693]: E1122 09:05:26.146070 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:26 crc kubenswrapper[4693]: E1122 09:05:26.146136 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:26 crc kubenswrapper[4693]: E1122 09:05:26.146354 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:26 crc kubenswrapper[4693]: I1122 09:05:26.146679 4693 scope.go:117] "RemoveContainer" containerID="6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f" Nov 22 09:05:26 crc kubenswrapper[4693]: I1122 09:05:26.567039 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/1.log" Nov 22 09:05:26 crc kubenswrapper[4693]: I1122 09:05:26.567086 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerStarted","Data":"1173ab64803d2496086dccda3635309cbc051245ad9b4e5b7bd87bd5f093ba6e"} Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.146472 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:27 crc kubenswrapper[4693]: E1122 09:05:27.147060 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.146929 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.570714 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/3.log" Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.572979 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerStarted","Data":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.573331 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.589939 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podStartSLOduration=106.589927171 podStartE2EDuration="1m46.589927171s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:27.589451873 +0000 UTC m=+123.731954165" watchObservedRunningTime="2025-11-22 09:05:27.589927171 +0000 UTC m=+123.732429462" Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.759915 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-t4blm"] Nov 22 09:05:27 crc kubenswrapper[4693]: I1122 09:05:27.760022 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:27 crc kubenswrapper[4693]: E1122 09:05:27.760096 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:28 crc kubenswrapper[4693]: I1122 09:05:28.146736 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:28 crc kubenswrapper[4693]: I1122 09:05:28.146745 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:28 crc kubenswrapper[4693]: E1122 09:05:28.146870 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:28 crc kubenswrapper[4693]: I1122 09:05:28.146751 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:28 crc kubenswrapper[4693]: E1122 09:05:28.146950 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:28 crc kubenswrapper[4693]: E1122 09:05:28.147092 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:29 crc kubenswrapper[4693]: I1122 09:05:29.146681 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:29 crc kubenswrapper[4693]: E1122 09:05:29.146786 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:29 crc kubenswrapper[4693]: E1122 09:05:29.227899 4693 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 09:05:30 crc kubenswrapper[4693]: I1122 09:05:30.146630 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:30 crc kubenswrapper[4693]: I1122 09:05:30.146692 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:30 crc kubenswrapper[4693]: E1122 09:05:30.146750 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:30 crc kubenswrapper[4693]: I1122 09:05:30.146826 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:30 crc kubenswrapper[4693]: E1122 09:05:30.146940 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:30 crc kubenswrapper[4693]: E1122 09:05:30.147121 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:31 crc kubenswrapper[4693]: I1122 09:05:31.145750 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:31 crc kubenswrapper[4693]: E1122 09:05:31.145871 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:32 crc kubenswrapper[4693]: I1122 09:05:32.147396 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:32 crc kubenswrapper[4693]: I1122 09:05:32.147416 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:32 crc kubenswrapper[4693]: I1122 09:05:32.147467 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:32 crc kubenswrapper[4693]: E1122 09:05:32.147507 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:32 crc kubenswrapper[4693]: E1122 09:05:32.147599 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:32 crc kubenswrapper[4693]: E1122 09:05:32.147671 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:33 crc kubenswrapper[4693]: I1122 09:05:33.146617 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:33 crc kubenswrapper[4693]: E1122 09:05:33.146720 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-t4blm" podUID="fbc29b7c-60ea-4d65-ae31-fee4b8e7f833" Nov 22 09:05:34 crc kubenswrapper[4693]: I1122 09:05:34.145738 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:34 crc kubenswrapper[4693]: I1122 09:05:34.145747 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:34 crc kubenswrapper[4693]: E1122 09:05:34.146507 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 22 09:05:34 crc kubenswrapper[4693]: E1122 09:05:34.146674 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 22 09:05:34 crc kubenswrapper[4693]: I1122 09:05:34.146684 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:34 crc kubenswrapper[4693]: E1122 09:05:34.146870 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 22 09:05:35 crc kubenswrapper[4693]: I1122 09:05:35.146423 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:05:35 crc kubenswrapper[4693]: I1122 09:05:35.148171 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 22 09:05:35 crc kubenswrapper[4693]: I1122 09:05:35.148669 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.146218 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.146248 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.146218 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.147530 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.148170 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.148210 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 22 09:05:36 crc kubenswrapper[4693]: I1122 09:05:36.148444 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.381033 4693 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.402483 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2d96p"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.403055 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-dgfk9"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.403203 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.403828 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.404289 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv"] Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.404551 4693 reflector.go:561] object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z": failed to list *v1.Secret: secrets "openshift-config-operator-dockercfg-7pc5z" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.404589 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"openshift-config-operator-dockercfg-7pc5z\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-config-operator-dockercfg-7pc5z\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.404645 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.404935 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wg5vx"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.405251 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.405658 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-m5ldq"] Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.405875 4693 reflector.go:561] object-"openshift-config-operator"/"config-operator-serving-cert": failed to list *v1.Secret: secrets "config-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.405905 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"config-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"config-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.405968 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.406596 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7"] Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.406679 4693 reflector.go:561] object-"openshift-config-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.406705 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.406834 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.407266 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-chjcb"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.407639 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.407663 4693 reflector.go:561] object-"openshift-console-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.407683 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.410773 4693 reflector.go:561] object-"openshift-console-operator"/"serving-cert": failed to list *v1.Secret: secrets "serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.410797 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.410909 4693 reflector.go:561] object-"openshift-console"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.410917 4693 reflector.go:561] object-"openshift-dns-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.410931 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.410933 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.410984 4693 reflector.go:561] object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert": failed to list *v1.Secret: secrets "openshift-controller-manager-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411000 4693 reflector.go:561] object-"openshift-console-operator"/"console-operator-config": failed to list *v1.ConfigMap: configmaps "console-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411014 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"console-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"console-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.410997 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411032 4693 reflector.go:561] object-"openshift-console"/"console-oauth-config": failed to list *v1.Secret: secrets "console-oauth-config" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411043 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"console-oauth-config\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"console-oauth-config\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411051 4693 reflector.go:561] object-"openshift-console"/"service-ca": failed to list *v1.ConfigMap: configmaps "service-ca" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411061 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"service-ca\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"service-ca\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411094 4693 reflector.go:561] object-"openshift-apiserver-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411103 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411119 4693 reflector.go:561] object-"openshift-controller-manager-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411131 4693 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv": failed to list *v1.Secret: secrets "openshift-apiserver-operator-dockercfg-xtcjv" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411142 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-dockercfg-xtcjv\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-dockercfg-xtcjv\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411130 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411173 4693 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7": failed to list *v1.Secret: secrets "machine-api-operator-dockercfg-mfbb7" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411181 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-dockercfg-mfbb7\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-api-operator-dockercfg-mfbb7\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411207 4693 reflector.go:561] object-"openshift-machine-api"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411214 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411247 4693 reflector.go:561] object-"openshift-controller-manager-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411257 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411286 4693 reflector.go:561] object-"openshift-console"/"trusted-ca-bundle": failed to list *v1.ConfigMap: configmaps "trusted-ca-bundle" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411294 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"trusted-ca-bundle\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"trusted-ca-bundle\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411319 4693 reflector.go:561] object-"openshift-console"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411328 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411337 4693 reflector.go:561] object-"openshift-config-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-config-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411351 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-config-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-config-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.411367 4693 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-tls": failed to list *v1.Secret: secrets "machine-api-operator-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.411375 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"machine-api-operator-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.412855 4693 reflector.go:561] object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw": failed to list *v1.Secret: secrets "openshift-controller-manager-operator-dockercfg-vw8fw" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-controller-manager-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.412887 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-dockercfg-vw8fw\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-controller-manager-operator-dockercfg-vw8fw\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-controller-manager-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.412871 4693 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert": failed to list *v1.Secret: secrets "openshift-apiserver-operator-serving-cert" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.412911 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-serving-cert\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"openshift-apiserver-operator-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413037 4693 reflector.go:561] object-"openshift-console"/"oauth-serving-cert": failed to list *v1.ConfigMap: configmaps "oauth-serving-cert" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413049 4693 reflector.go:561] object-"openshift-machine-api"/"kube-rbac-proxy": failed to list *v1.ConfigMap: configmaps "kube-rbac-proxy" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413060 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console\"/\"oauth-serving-cert\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"oauth-serving-cert\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413068 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"kube-rbac-proxy\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-rbac-proxy\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413107 4693 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413114 4693 reflector.go:561] object-"openshift-dns-operator"/"metrics-tls": failed to list *v1.Secret: secrets "metrics-tls" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-dns-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413119 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413128 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns-operator\"/\"metrics-tls\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"metrics-tls\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-dns-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413159 4693 reflector.go:561] object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config": failed to list *v1.ConfigMap: configmaps "openshift-apiserver-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-apiserver-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413187 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-apiserver-operator\"/\"openshift-apiserver-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-apiserver-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-apiserver-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413435 4693 reflector.go:561] object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config": failed to list *v1.ConfigMap: configmaps "openshift-controller-manager-operator-config" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-controller-manager-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413456 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-controller-manager-operator\"/\"openshift-controller-manager-operator-config\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-controller-manager-operator-config\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-controller-manager-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413698 4693 reflector.go:561] object-"openshift-dns-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-dns-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.413726 4693 reflector.go:561] object-"openshift-machine-api"/"machine-api-operator-images": failed to list *v1.ConfigMap: configmaps "machine-api-operator-images" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413742 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"machine-api-operator-images\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"machine-api-operator-images\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.413873 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.414085 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.414092 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.413722 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-dns-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-dns-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.414331 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.414682 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dgnsw"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.414940 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.415307 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.415646 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.416130 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.416457 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.427515 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g7zss\" (UniqueName: \"kubernetes.io/projected/f0177d6a-4233-4759-9f66-facba4d65adf-kube-api-access-g7zss\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.427645 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.427729 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-client-ca\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.427817 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6822f49-a368-49db-8f43-63abe760e740-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.427905 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/db30dd05-0c41-415a-aa26-e78c9ebae1bc-machine-approver-tls\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.427977 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhjnd\" (UniqueName: \"kubernetes.io/projected/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-kube-api-access-dhjnd\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428037 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wtbn\" (UniqueName: \"kubernetes.io/projected/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-kube-api-access-4wtbn\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428105 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428184 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-config\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428247 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8b4x8\" (UniqueName: \"kubernetes.io/projected/0b941991-050f-44f9-a7e8-eb0b1ae14ade-kube-api-access-8b4x8\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428307 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/db30dd05-0c41-415a-aa26-e78c9ebae1bc-auth-proxy-config\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428372 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428433 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c428410b-f6ac-47d2-aae7-76f314f718db-audit-dir\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428496 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428552 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9649ff6-4471-48d9-b751-56ac85bd9c91-serving-cert\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428615 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db30dd05-0c41-415a-aa26-e78c9ebae1bc-config\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428699 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428763 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428820 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-config\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428899 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hswb2\" (UniqueName: \"kubernetes.io/projected/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-kube-api-access-hswb2\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.428959 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-config\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429052 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-audit-policies\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429123 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-serving-cert\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429183 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/56f55ada-5668-4a07-888e-1c578214f660-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429243 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429329 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-encryption-config\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429399 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429461 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429538 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429604 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rspms\" (UniqueName: \"kubernetes.io/projected/db30dd05-0c41-415a-aa26-e78c9ebae1bc-kube-api-access-rspms\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429680 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5ncsx\" (UniqueName: \"kubernetes.io/projected/24790c77-ff73-4528-a742-ab163edd8ac8-kube-api-access-5ncsx\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429747 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-trusted-ca\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429811 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xx78\" (UniqueName: \"kubernetes.io/projected/a6822f49-a368-49db-8f43-63abe760e740-kube-api-access-9xx78\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429895 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8czr\" (UniqueName: \"kubernetes.io/projected/c9649ff6-4471-48d9-b751-56ac85bd9c91-kube-api-access-z8czr\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.429956 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bng4x\" (UniqueName: \"kubernetes.io/projected/56f55ada-5668-4a07-888e-1c578214f660-kube-api-access-bng4x\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430024 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24790c77-ff73-4528-a742-ab163edd8ac8-serving-cert\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430082 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-etcd-client\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430147 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5rgfk\" (UniqueName: \"kubernetes.io/projected/c428410b-f6ac-47d2-aae7-76f314f718db-kube-api-access-5rgfk\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.430189 4693 reflector.go:561] object-"openshift-machine-api"/"openshift-service-ca.crt": failed to list *v1.ConfigMap: configmaps "openshift-service-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-machine-api": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.430223 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-machine-api\"/\"openshift-service-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"openshift-service-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-machine-api\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: W1122 09:05:37.430266 4693 reflector.go:561] object-"openshift-console-operator"/"kube-root-ca.crt": failed to list *v1.ConfigMap: configmaps "kube-root-ca.crt" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "openshift-console-operator": no relationship found between node 'crc' and this object Nov 22 09:05:37 crc kubenswrapper[4693]: E1122 09:05:37.430276 4693 reflector.go:158] "Unhandled Error" err="object-\"openshift-console-operator\"/\"kube-root-ca.crt\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"kube-root-ca.crt\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"openshift-console-operator\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430343 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430405 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-client-ca\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430471 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430528 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-serving-cert\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430603 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430678 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6822f49-a368-49db-8f43-63abe760e740-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430735 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430802 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430879 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-images\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430476 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.430601 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.431027 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.431896 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432023 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432224 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432397 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432450 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432488 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432535 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.432678 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.439014 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.439180 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.442287 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.439352 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.439864 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.440195 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.440981 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.442401 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rz6dr"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.442464 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.442484 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.442758 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.449900 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.449997 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450077 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450121 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450188 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450082 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450263 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450265 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450765 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450288 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.451076 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-984ks"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.454672 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sthb9"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.450317 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.452172 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.454973 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-6s452"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.452256 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.455074 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.455105 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.455401 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.455454 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.460244 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.460321 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.455578 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.460332 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.460367 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.460463 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.463384 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.463417 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.463456 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.463950 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.464148 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.465535 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-m97gh"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.465691 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.464196 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.464223 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.466100 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.464235 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.464238 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.466309 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.466370 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.466705 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.466877 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.467903 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.468355 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470291 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470448 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470498 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470556 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470724 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470912 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470959 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.470919 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.471175 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.471243 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.471985 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472257 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472345 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472393 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472467 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472570 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472583 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.472759 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.473215 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.473676 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.475322 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.475326 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.475405 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.476234 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.477522 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.478156 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5t5mq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.478584 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.479098 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7skdb"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.486099 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.489725 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.514481 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c5cns"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.514901 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wg5vx"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.514932 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m5ldq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.514975 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.515246 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.515771 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-chjcb"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.516396 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.516540 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.517019 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.517100 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.517230 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.517028 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.517388 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.518035 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-2z7n2"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.518362 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.518758 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-s8j86"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.519161 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.519477 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.520073 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.520186 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.520588 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.521114 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.521435 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.521583 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.522011 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.525380 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.525510 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.525967 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.526136 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-dgfk9"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.526926 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.527591 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2d96p"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.528684 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.531730 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-984ks"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.531751 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532170 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-config\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532251 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8b4x8\" (UniqueName: \"kubernetes.io/projected/0b941991-050f-44f9-a7e8-eb0b1ae14ade-kube-api-access-8b4x8\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532330 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/db30dd05-0c41-415a-aa26-e78c9ebae1bc-auth-proxy-config\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532402 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c428410b-f6ac-47d2-aae7-76f314f718db-audit-dir\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532475 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/c428410b-f6ac-47d2-aae7-76f314f718db-audit-dir\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532486 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532541 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532565 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9649ff6-4471-48d9-b751-56ac85bd9c91-serving-cert\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532582 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db30dd05-0c41-415a-aa26-e78c9ebae1bc-config\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532603 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532621 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-config\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532645 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c5cns"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532659 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4385a1e7-41f2-423b-8a29-749bbb181512-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tfptk\" (UID: \"4385a1e7-41f2-423b-8a29-749bbb181512\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532681 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532697 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hswb2\" (UniqueName: \"kubernetes.io/projected/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-kube-api-access-hswb2\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532713 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-config\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532750 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-serving-cert\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532767 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/56f55ada-5668-4a07-888e-1c578214f660-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532785 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-audit-policies\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532805 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532824 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-encryption-config\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532852 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532871 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532888 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532903 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rspms\" (UniqueName: \"kubernetes.io/projected/db30dd05-0c41-415a-aa26-e78c9ebae1bc-kube-api-access-rspms\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532917 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5ncsx\" (UniqueName: \"kubernetes.io/projected/24790c77-ff73-4528-a742-ab163edd8ac8-kube-api-access-5ncsx\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532937 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8czr\" (UniqueName: \"kubernetes.io/projected/c9649ff6-4471-48d9-b751-56ac85bd9c91-kube-api-access-z8czr\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532952 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bng4x\" (UniqueName: \"kubernetes.io/projected/56f55ada-5668-4a07-888e-1c578214f660-kube-api-access-bng4x\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532966 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24790c77-ff73-4528-a742-ab163edd8ac8-serving-cert\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532981 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2msd\" (UniqueName: \"kubernetes.io/projected/4385a1e7-41f2-423b-8a29-749bbb181512-kube-api-access-s2msd\") pod \"package-server-manager-789f6589d5-tfptk\" (UID: \"4385a1e7-41f2-423b-8a29-749bbb181512\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.532998 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-trusted-ca\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533013 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xx78\" (UniqueName: \"kubernetes.io/projected/a6822f49-a368-49db-8f43-63abe760e740-kube-api-access-9xx78\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533016 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/db30dd05-0c41-415a-aa26-e78c9ebae1bc-auth-proxy-config\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533029 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-etcd-client\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533065 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5rgfk\" (UniqueName: \"kubernetes.io/projected/c428410b-f6ac-47d2-aae7-76f314f718db-kube-api-access-5rgfk\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533083 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533096 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-client-ca\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533113 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533127 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-serving-cert\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533143 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533160 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6822f49-a368-49db-8f43-63abe760e740-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533174 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533190 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-images\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533214 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533232 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g7zss\" (UniqueName: \"kubernetes.io/projected/f0177d6a-4233-4759-9f66-facba4d65adf-kube-api-access-g7zss\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533247 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533265 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-client-ca\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533285 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6822f49-a368-49db-8f43-63abe760e740-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533299 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/db30dd05-0c41-415a-aa26-e78c9ebae1bc-machine-approver-tls\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533311 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db30dd05-0c41-415a-aa26-e78c9ebae1bc-config\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533315 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhjnd\" (UniqueName: \"kubernetes.io/projected/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-kube-api-access-dhjnd\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533362 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wtbn\" (UniqueName: \"kubernetes.io/projected/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-kube-api-access-4wtbn\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533384 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533727 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-config\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.533753 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.534009 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-audit-policies\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.534341 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.534462 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/c428410b-f6ac-47d2-aae7-76f314f718db-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.534734 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.535351 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-client-ca\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.535378 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-client-ca\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.535473 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.535681 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.535785 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-trusted-ca\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.535805 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-available-featuregates\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.536328 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-config\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.536674 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9649ff6-4471-48d9-b751-56ac85bd9c91-serving-cert\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.537081 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rz6dr"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545051 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545067 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5t5mq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545077 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sthb9"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545088 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-6s452"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545096 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dgnsw"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545104 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545112 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.542596 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-encryption-config\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.537378 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-etcd-client\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.537825 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24790c77-ff73-4528-a742-ab163edd8ac8-serving-cert\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.538836 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/db30dd05-0c41-415a-aa26-e78c9ebae1bc-machine-approver-tls\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.540014 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.545644 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.554125 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.556864 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c428410b-f6ac-47d2-aae7-76f314f718db-serving-cert\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.556907 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-m97gh"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.559342 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-f94z5"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.559990 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.560193 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.561050 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.562210 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7skdb"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.564649 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.566192 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.567175 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.568103 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-f94z5"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.569138 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.570066 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.570969 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-s8j86"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.572707 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.573852 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.574470 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.575302 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.576150 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.576971 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.577753 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-6bbcf"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.578286 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.578595 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wwrvj"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.579379 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wwrvj"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.579434 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.584885 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.604970 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.624837 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.634569 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4385a1e7-41f2-423b-8a29-749bbb181512-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tfptk\" (UID: \"4385a1e7-41f2-423b-8a29-749bbb181512\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.634706 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2msd\" (UniqueName: \"kubernetes.io/projected/4385a1e7-41f2-423b-8a29-749bbb181512-kube-api-access-s2msd\") pod \"package-server-manager-789f6589d5-tfptk\" (UID: \"4385a1e7-41f2-423b-8a29-749bbb181512\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.635908 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-rv5vl"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.637219 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.639037 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-rv5vl"] Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.649153 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.664677 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.684347 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.704468 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.724328 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.744360 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.764857 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.784739 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.804755 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.824289 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.845131 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.864984 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.884995 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.905131 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.924907 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.944427 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.964350 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 22 09:05:37 crc kubenswrapper[4693]: I1122 09:05:37.985473 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.004187 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.025426 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.044856 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.047363 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/4385a1e7-41f2-423b-8a29-749bbb181512-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-tfptk\" (UID: \"4385a1e7-41f2-423b-8a29-749bbb181512\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.064908 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.104421 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.125265 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.150536 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.165832 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.185094 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.205529 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.229360 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.244916 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.264571 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.284706 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.304286 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.324462 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.344617 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.364536 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.384343 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.405237 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.424956 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.445483 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.464968 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.485089 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.505345 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.524022 4693 request.go:700] Waited for 1.006536108s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-operator/secrets?fieldSelector=metadata.name%3Dingress-operator-dockercfg-7lnqk&limit=500&resourceVersion=0 Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.525028 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.533128 4693 secret.go:188] Couldn't get secret openshift-console-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.533169 4693 configmap.go:193] Couldn't get configMap openshift-machine-api/kube-rbac-proxy: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.533186 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert podName:0b941991-050f-44f9-a7e8-eb0b1ae14ade nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.033171621 +0000 UTC m=+135.175673912 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert") pod "console-operator-58897d9998-wg5vx" (UID: "0b941991-050f-44f9-a7e8-eb0b1ae14ade") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.533215 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-config podName:56f55ada-5668-4a07-888e-1c578214f660 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.033200987 +0000 UTC m=+135.175703278 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-config") pod "machine-api-operator-5694c8668f-chjcb" (UID: "56f55ada-5668-4a07-888e-1c578214f660") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534257 4693 configmap.go:193] Couldn't get configMap openshift-console/oauth-serving-cert: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534296 4693 secret.go:188] Couldn't get secret openshift-console/console-oauth-config: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534307 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert podName:8cd5b74f-7a92-4b0f-9846-e9afd22fc091 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.034294371 +0000 UTC m=+135.176796662 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "oauth-serving-cert" (UniqueName: "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert") pod "console-f9d7485db-m5ldq" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534310 4693 configmap.go:193] Couldn't get configMap openshift-console-operator/console-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534316 4693 secret.go:188] Couldn't get secret openshift-machine-api/machine-api-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534266 4693 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534347 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config podName:8cd5b74f-7a92-4b0f-9846-e9afd22fc091 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.03433061 +0000 UTC m=+135.176832900 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config") pod "console-f9d7485db-m5ldq" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534355 4693 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534363 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert podName:f0177d6a-4233-4759-9f66-facba4d65adf nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.034355607 +0000 UTC m=+135.176857898 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-k28v7" (UID: "f0177d6a-4233-4759-9f66-facba4d65adf") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534364 4693 configmap.go:193] Couldn't get configMap openshift-machine-api/machine-api-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534378 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config podName:f0177d6a-4233-4759-9f66-facba4d65adf nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.034370415 +0000 UTC m=+135.176872706 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config") pod "openshift-apiserver-operator-796bbdcf4f-k28v7" (UID: "f0177d6a-4233-4759-9f66-facba4d65adf") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534393 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config podName:0b941991-050f-44f9-a7e8-eb0b1ae14ade nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.034387447 +0000 UTC m=+135.176889738 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config") pod "console-operator-58897d9998-wg5vx" (UID: "0b941991-050f-44f9-a7e8-eb0b1ae14ade") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534411 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-images podName:56f55ada-5668-4a07-888e-1c578214f660 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.034399249 +0000 UTC m=+135.176901540 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-images") pod "machine-api-operator-5694c8668f-chjcb" (UID: "56f55ada-5668-4a07-888e-1c578214f660") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.534427 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/56f55ada-5668-4a07-888e-1c578214f660-machine-api-operator-tls podName:56f55ada-5668-4a07-888e-1c578214f660 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.034420249 +0000 UTC m=+135.176922540 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "machine-api-operator-tls" (UniqueName: "kubernetes.io/secret/56f55ada-5668-4a07-888e-1c578214f660-machine-api-operator-tls") pod "machine-api-operator-5694c8668f-chjcb" (UID: "56f55ada-5668-4a07-888e-1c578214f660") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535494 4693 configmap.go:193] Couldn't get configMap openshift-console/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535513 4693 secret.go:188] Couldn't get secret openshift-controller-manager-operator/openshift-controller-manager-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535499 4693 secret.go:188] Couldn't get secret openshift-dns-operator/metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535546 4693 configmap.go:193] Couldn't get configMap openshift-console/service-ca: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535552 4693 secret.go:188] Couldn't get secret openshift-config-operator/config-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535554 4693 configmap.go:193] Couldn't get configMap openshift-controller-manager-operator/openshift-controller-manager-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535536 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle podName:8cd5b74f-7a92-4b0f-9846-e9afd22fc091 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.03552786 +0000 UTC m=+135.178030151 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle") pod "console-f9d7485db-m5ldq" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535599 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a6822f49-a368-49db-8f43-63abe760e740-serving-cert podName:a6822f49-a368-49db-8f43-63abe760e740 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.035593875 +0000 UTC m=+135.178096166 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/a6822f49-a368-49db-8f43-63abe760e740-serving-cert") pod "openshift-controller-manager-operator-756b6f6bc6-rzchv" (UID: "a6822f49-a368-49db-8f43-63abe760e740") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535611 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls podName:f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.035604404 +0000 UTC m=+135.178106696 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls") pod "dns-operator-744455d44c-dgfk9" (UID: "f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535621 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca podName:8cd5b74f-7a92-4b0f-9846-e9afd22fc091 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.035616467 +0000 UTC m=+135.178118758 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "service-ca" (UniqueName: "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca") pod "console-f9d7485db-m5ldq" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535631 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-serving-cert podName:d019eb1c-20c3-4662-87b1-2dbd0b92ce1c nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.035627438 +0000 UTC m=+135.178129729 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-serving-cert") pod "openshift-config-operator-7777fb866f-2d96p" (UID: "d019eb1c-20c3-4662-87b1-2dbd0b92ce1c") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: E1122 09:05:38.535653 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/a6822f49-a368-49db-8f43-63abe760e740-config podName:a6822f49-a368-49db-8f43-63abe760e740 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:39.035649129 +0000 UTC m=+135.178151421 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/a6822f49-a368-49db-8f43-63abe760e740-config") pod "openshift-controller-manager-operator-756b6f6bc6-rzchv" (UID: "a6822f49-a368-49db-8f43-63abe760e740") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.549420 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.564946 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.585586 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.604732 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.625263 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.644559 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.664775 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.684405 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.705386 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.724460 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.744953 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.764988 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.784410 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.805077 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.825168 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.844678 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.865395 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.885010 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.905228 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.924554 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.944889 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.965220 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 22 09:05:38 crc kubenswrapper[4693]: I1122 09:05:38.984368 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.005171 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.025336 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.044449 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046492 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046526 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046598 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046621 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046646 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046661 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6822f49-a368-49db-8f43-63abe760e740-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046682 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-images\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046715 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046732 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6822f49-a368-49db-8f43-63abe760e740-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046772 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-config\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046795 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046811 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046828 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046879 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-serving-cert\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.046898 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/56f55ada-5668-4a07-888e-1c578214f660-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.065024 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.085237 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.105155 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.124434 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.145213 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.165747 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.185092 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.204651 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.315311 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5rgfk\" (UniqueName: \"kubernetes.io/projected/c428410b-f6ac-47d2-aae7-76f314f718db-kube-api-access-5rgfk\") pod \"apiserver-7bbb656c7d-w6rvg\" (UID: \"c428410b-f6ac-47d2-aae7-76f314f718db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.325176 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.335281 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8czr\" (UniqueName: \"kubernetes.io/projected/c9649ff6-4471-48d9-b751-56ac85bd9c91-kube-api-access-z8czr\") pod \"route-controller-manager-6576b87f9c-dd9wx\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.376509 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5ncsx\" (UniqueName: \"kubernetes.io/projected/24790c77-ff73-4528-a742-ab163edd8ac8-kube-api-access-5ncsx\") pod \"controller-manager-879f6c89f-dgnsw\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.439287 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rspms\" (UniqueName: \"kubernetes.io/projected/db30dd05-0c41-415a-aa26-e78c9ebae1bc-kube-api-access-rspms\") pod \"machine-approver-56656f9798-5m8d6\" (UID: \"db30dd05-0c41-415a-aa26-e78c9ebae1bc\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.454387 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg"] Nov 22 09:05:39 crc kubenswrapper[4693]: W1122 09:05:39.459703 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc428410b_f6ac_47d2_aae7_76f314f718db.slice/crio-5ba34e58dd989ba9e77ef12c9806e1142c5ddc7731f02bf29e0efb9e80e5cda9 WatchSource:0}: Error finding container 5ba34e58dd989ba9e77ef12c9806e1142c5ddc7731f02bf29e0efb9e80e5cda9: Status 404 returned error can't find the container with id 5ba34e58dd989ba9e77ef12c9806e1142c5ddc7731f02bf29e0efb9e80e5cda9 Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.464316 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.484525 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.504953 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.524522 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.543796 4693 request.go:700] Waited for 1.965315057s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-machine-config-operator/secrets?fieldSelector=metadata.name%3Dmachine-config-server-tls&limit=500&resourceVersion=0 Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.544726 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.565074 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.583868 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.584941 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.600026 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" event={"ID":"c428410b-f6ac-47d2-aae7-76f314f718db","Type":"ContainerStarted","Data":"48d4d93208bdf1afbb7e3f824c3375dce34b1f9277906e3bf0adaabca639e916"} Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.600065 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" event={"ID":"c428410b-f6ac-47d2-aae7-76f314f718db","Type":"ContainerStarted","Data":"5ba34e58dd989ba9e77ef12c9806e1142c5ddc7731f02bf29e0efb9e80e5cda9"} Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.605114 4693 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.618676 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.624664 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.630877 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.644721 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: W1122 09:05:39.669985 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddb30dd05_0c41_415a_aa26_e78c9ebae1bc.slice/crio-73a8cdc1d7d31683f45108c95cf0effb4b880e992cbfccd39e467bf80ec78e94 WatchSource:0}: Error finding container 73a8cdc1d7d31683f45108c95cf0effb4b880e992cbfccd39e467bf80ec78e94: Status 404 returned error can't find the container with id 73a8cdc1d7d31683f45108c95cf0effb4b880e992cbfccd39e467bf80ec78e94 Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.678339 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2msd\" (UniqueName: \"kubernetes.io/projected/4385a1e7-41f2-423b-8a29-749bbb181512-kube-api-access-s2msd\") pod \"package-server-manager-789f6589d5-tfptk\" (UID: \"4385a1e7-41f2-423b-8a29-749bbb181512\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.684552 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.694781 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx"] Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.705790 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 22 09:05:39 crc kubenswrapper[4693]: W1122 09:05:39.706946 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc9649ff6_4471_48d9_b751_56ac85bd9c91.slice/crio-53a0330b188e3e7654370757e8c67a79dec1dd65fae989d6dae409f943241bd1 WatchSource:0}: Error finding container 53a0330b188e3e7654370757e8c67a79dec1dd65fae989d6dae409f943241bd1: Status 404 returned error can't find the container with id 53a0330b188e3e7654370757e8c67a79dec1dd65fae989d6dae409f943241bd1 Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.724973 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.725699 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.728611 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dgnsw"] Nov 22 09:05:39 crc kubenswrapper[4693]: W1122 09:05:39.740457 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24790c77_ff73_4528_a742_ab163edd8ac8.slice/crio-235c5f62adbd87296ff1348b625c28be64a50f40567c7b2c18ada111282b52c1 WatchSource:0}: Error finding container 235c5f62adbd87296ff1348b625c28be64a50f40567c7b2c18ada111282b52c1: Status 404 returned error can't find the container with id 235c5f62adbd87296ff1348b625c28be64a50f40567c7b2c18ada111282b52c1 Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.765601 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.770322 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/56f55ada-5668-4a07-888e-1c578214f660-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.784924 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.789993 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a6822f49-a368-49db-8f43-63abe760e740-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.804756 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.811423 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-serving-cert\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.825003 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.845752 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.846186 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk"] Nov 22 09:05:39 crc kubenswrapper[4693]: W1122 09:05:39.851586 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4385a1e7_41f2_423b_8a29_749bbb181512.slice/crio-22feef1155ef8fc8eb9696db517b341d05c60eff2286cafd2895dc4d377665c2 WatchSource:0}: Error finding container 22feef1155ef8fc8eb9696db517b341d05c60eff2286cafd2895dc4d377665c2: Status 404 returned error can't find the container with id 22feef1155ef8fc8eb9696db517b341d05c60eff2286cafd2895dc4d377665c2 Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.865585 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.868120 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-config\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.884646 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.887523 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a6822f49-a368-49db-8f43-63abe760e740-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.905305 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.907609 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.924419 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.928090 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.944747 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.964603 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.985074 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 22 09:05:39 crc kubenswrapper[4693]: I1122 09:05:39.996442 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g7zss\" (UniqueName: \"kubernetes.io/projected/f0177d6a-4233-4759-9f66-facba4d65adf-kube-api-access-g7zss\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.004421 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.008050 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/56f55ada-5668-4a07-888e-1c578214f660-images\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.025419 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046712 4693 configmap.go:193] Couldn't get configMap openshift-console/trusted-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046726 4693 secret.go:188] Couldn't get secret openshift-console/console-oauth-config: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046764 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle podName:8cd5b74f-7a92-4b0f-9846-e9afd22fc091 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.046751923 +0000 UTC m=+137.189254214 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "trusted-ca-bundle" (UniqueName: "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle") pod "console-f9d7485db-m5ldq" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046777 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config podName:8cd5b74f-7a92-4b0f-9846-e9afd22fc091 nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.04677177 +0000 UTC m=+137.189274061 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "console-oauth-config" (UniqueName: "kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config") pod "console-f9d7485db-m5ldq" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046804 4693 configmap.go:193] Couldn't get configMap openshift-console-operator/console-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046831 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config podName:0b941991-050f-44f9-a7e8-eb0b1ae14ade nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.04682505 +0000 UTC m=+137.189327341 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config") pod "console-operator-58897d9998-wg5vx" (UID: "0b941991-050f-44f9-a7e8-eb0b1ae14ade") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046877 4693 secret.go:188] Couldn't get secret openshift-dns-operator/metrics-tls: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046944 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls podName:f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.046930749 +0000 UTC m=+137.189433040 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-tls" (UniqueName: "kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls") pod "dns-operator-744455d44c-dgfk9" (UID: "f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046966 4693 secret.go:188] Couldn't get secret openshift-apiserver-operator/openshift-apiserver-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046968 4693 secret.go:188] Couldn't get secret openshift-console-operator/serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046988 4693 configmap.go:193] Couldn't get configMap openshift-apiserver-operator/openshift-apiserver-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.046992 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert podName:f0177d6a-4233-4759-9f66-facba4d65adf nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.04698437 +0000 UTC m=+137.189486661 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert") pod "openshift-apiserver-operator-796bbdcf4f-k28v7" (UID: "f0177d6a-4233-4759-9f66-facba4d65adf") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.047046 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert podName:0b941991-050f-44f9-a7e8-eb0b1ae14ade nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.04703241 +0000 UTC m=+137.189534701 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert") pod "console-operator-58897d9998-wg5vx" (UID: "0b941991-050f-44f9-a7e8-eb0b1ae14ade") : failed to sync secret cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.047060 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config podName:f0177d6a-4233-4759-9f66-facba4d65adf nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.047054392 +0000 UTC m=+137.189556683 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config") pod "openshift-apiserver-operator-796bbdcf4f-k28v7" (UID: "f0177d6a-4233-4759-9f66-facba4d65adf") : failed to sync configmap cache: timed out waiting for the condition Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.049745 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.064887 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.084777 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.097754 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wtbn\" (UniqueName: \"kubernetes.io/projected/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-kube-api-access-4wtbn\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.104453 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.125085 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.144531 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.157039 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hswb2\" (UniqueName: \"kubernetes.io/projected/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-kube-api-access-hswb2\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.164960 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.185093 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.204397 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.216668 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhjnd\" (UniqueName: \"kubernetes.io/projected/d019eb1c-20c3-4662-87b1-2dbd0b92ce1c-kube-api-access-dhjnd\") pod \"openshift-config-operator-7777fb866f-2d96p\" (UID: \"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.224548 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.245107 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.258228 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bng4x\" (UniqueName: \"kubernetes.io/projected/56f55ada-5668-4a07-888e-1c578214f660-kube-api-access-bng4x\") pod \"machine-api-operator-5694c8668f-chjcb\" (UID: \"56f55ada-5668-4a07-888e-1c578214f660\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.265660 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.285012 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.304515 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.325426 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.344884 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.364549 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.376317 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8b4x8\" (UniqueName: \"kubernetes.io/projected/0b941991-050f-44f9-a7e8-eb0b1ae14ade-kube-api-access-8b4x8\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.384857 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.405081 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.409068 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xx78\" (UniqueName: \"kubernetes.io/projected/a6822f49-a368-49db-8f43-63abe760e740-kube-api-access-9xx78\") pod \"openshift-controller-manager-operator-756b6f6bc6-rzchv\" (UID: \"a6822f49-a368-49db-8f43-63abe760e740\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.413387 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.429516 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460179 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf82f0b1-de6b-449a-be90-f76c217b315b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460235 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g6xcw\" (UniqueName: \"kubernetes.io/projected/5683ccd7-f245-4a19-aff7-686b294455f9-kube-api-access-g6xcw\") pod \"multus-admission-controller-857f4d67dd-sthb9\" (UID: \"5683ccd7-f245-4a19-aff7-686b294455f9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460253 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-etcd-serving-ca\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460330 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9kfj\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-kube-api-access-z9kfj\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460346 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-default-certificate\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f6d86049-ce66-4900-bc42-b1ac6864e79a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gqsrq\" (UID: \"f6d86049-ce66-4900-bc42-b1ac6864e79a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460387 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a6632e27-c682-4974-8903-36443b11d704-proxy-tls\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460405 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6a3a6a12-ab76-4742-91b4-af9ef4b70181-node-pullsecrets\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460420 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-serving-cert\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460434 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6a3a6a12-ab76-4742-91b4-af9ef4b70181-audit-dir\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460451 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460476 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-stats-auth\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460489 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460504 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-trusted-ca-bundle\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460518 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/24b3e483-5f23-4878-943e-efe9d09663ca-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460783 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-config\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460814 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21df8944-4816-4a75-83e5-0656e313029c-service-ca-bundle\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460831 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z99xf\" (UniqueName: \"kubernetes.io/projected/21df8944-4816-4a75-83e5-0656e313029c-kube-api-access-z99xf\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460899 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69dfl\" (UniqueName: \"kubernetes.io/projected/49503ce5-c224-4967-b968-0480264dca16-kube-api-access-69dfl\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460928 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p7s27\" (UniqueName: \"kubernetes.io/projected/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-kube-api-access-p7s27\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460952 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-audit-policies\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.460967 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461005 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-config\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461048 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-45j6j\" (UniqueName: \"kubernetes.io/projected/aa0bee87-fb16-49ea-9680-1ab3ebc1caed-kube-api-access-45j6j\") pod \"cluster-samples-operator-665b6dd947-9fflj\" (UID: \"aa0bee87-fb16-49ea-9680-1ab3ebc1caed\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461117 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/be6d2d6c-b568-4b92-915d-20dd0d7f233c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461155 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hfgf9\" (UniqueName: \"kubernetes.io/projected/6ba8530d-18bd-4021-8187-6c716bc87a32-kube-api-access-hfgf9\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461176 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-config-volume\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461283 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be6d2d6c-b568-4b92-915d-20dd0d7f233c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461336 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6875be01-c5cd-437a-a4aa-878ba5dbe400-serving-cert\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461353 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5683ccd7-f245-4a19-aff7-686b294455f9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sthb9\" (UID: \"5683ccd7-f245-4a19-aff7-686b294455f9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461376 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-serving-cert\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461506 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-tls\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461526 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n866q\" (UniqueName: \"kubernetes.io/projected/4ee0f119-a41c-44c4-8057-a8dd22d6b5c3-kube-api-access-n866q\") pod \"downloads-7954f5f757-984ks\" (UID: \"4ee0f119-a41c-44c4-8057-a8dd22d6b5c3\") " pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461559 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/49503ce5-c224-4967-b968-0480264dca16-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461588 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-srv-cert\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461631 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-trusted-ca\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461655 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79e553de-810b-486a-a739-8d9a5ce8e966-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.461675 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/17774a78-d5ad-4d77-8426-ae730dc29981-metrics-tls\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.462362 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-client\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.462582 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-config\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.462882 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a6632e27-c682-4974-8903-36443b11d704-images\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.462988 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463020 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-apiservice-cert\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463041 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-service-ca\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463058 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zp766\" (UniqueName: \"kubernetes.io/projected/14e6c49b-951c-4c4e-bacb-7d360d8bdd59-kube-api-access-zp766\") pod \"migrator-59844c95c7-j7fbj\" (UID: \"14e6c49b-951c-4c4e-bacb-7d360d8bdd59\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463116 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463133 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463149 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-serving-cert\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463163 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/aa0bee87-fb16-49ea-9680-1ab3ebc1caed-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9fflj\" (UID: \"aa0bee87-fb16-49ea-9680-1ab3ebc1caed\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463207 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f17aff8d-d3bb-461c-88c3-efc5501c99b0-signing-key\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463225 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79e553de-810b-486a-a739-8d9a5ce8e966-config\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463240 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9xrj5\" (UniqueName: \"kubernetes.io/projected/a6632e27-c682-4974-8903-36443b11d704-kube-api-access-9xrj5\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463255 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drg7v\" (UniqueName: \"kubernetes.io/projected/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-kube-api-access-drg7v\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463269 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463282 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4dztk\" (UniqueName: \"kubernetes.io/projected/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-kube-api-access-4dztk\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463299 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-encryption-config\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463331 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-config\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463356 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-certificates\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a5280f-5933-40af-9c61-41f4766fc538-audit-dir\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463384 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7106f2-8e3a-49a1-8645-639690798bf5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463407 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-etcd-client\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463423 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmswr\" (UniqueName: \"kubernetes.io/projected/f6d86049-ce66-4900-bc42-b1ac6864e79a-kube-api-access-wmswr\") pod \"control-plane-machine-set-operator-78cbb6b69f-gqsrq\" (UID: \"f6d86049-ce66-4900-bc42-b1ac6864e79a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463437 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be6d2d6c-b568-4b92-915d-20dd0d7f233c-config\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463451 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463467 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6rz7\" (UniqueName: \"kubernetes.io/projected/d1c24056-5be6-4718-9e38-e74af7d815db-kube-api-access-z6rz7\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463701 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/24b3e483-5f23-4878-943e-efe9d09663ca-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463737 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/24b3e483-5f23-4878-943e-efe9d09663ca-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463765 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d1c24056-5be6-4718-9e38-e74af7d815db-srv-cert\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463828 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-ca\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463868 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/17774a78-d5ad-4d77-8426-ae730dc29981-trusted-ca\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463884 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79e553de-810b-486a-a739-8d9a5ce8e966-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463905 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.463961 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-webhook-cert\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464014 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6632e27-c682-4974-8903-36443b11d704-auth-proxy-config\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464079 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7j2hr\" (UniqueName: \"kubernetes.io/projected/f17aff8d-d3bb-461c-88c3-efc5501c99b0-kube-api-access-7j2hr\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464221 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f17aff8d-d3bb-461c-88c3-efc5501c99b0-signing-cabundle\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464252 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/17774a78-d5ad-4d77-8426-ae730dc29981-bound-sa-token\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464268 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464309 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26lxl\" (UniqueName: \"kubernetes.io/projected/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-kube-api-access-26lxl\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464322 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-tmpfs\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464347 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464385 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464421 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-image-import-ca\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464438 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7106f2-8e3a-49a1-8645-639690798bf5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464452 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-secret-volume\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464479 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sx9vb\" (UniqueName: \"kubernetes.io/projected/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-kube-api-access-sx9vb\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464492 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-audit\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464505 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-profile-collector-cert\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464529 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464568 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frbx6\" (UniqueName: \"kubernetes.io/projected/6875be01-c5cd-437a-a4aa-878ba5dbe400-kube-api-access-frbx6\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464587 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464661 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-service-ca-bundle\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464676 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464704 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-metrics-certs\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464718 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xdx4\" (UniqueName: \"kubernetes.io/projected/24b3e483-5f23-4878-943e-efe9d09663ca-kube-api-access-8xdx4\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464732 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464749 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/49503ce5-c224-4967-b968-0480264dca16-proxy-tls\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464787 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-frxcd\" (UniqueName: \"kubernetes.io/projected/17774a78-d5ad-4d77-8426-ae730dc29981-kube-api-access-frxcd\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464803 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d1c24056-5be6-4718-9e38-e74af7d815db-profile-collector-cert\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464817 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf82f0b1-de6b-449a-be90-f76c217b315b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464832 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464863 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h78lc\" (UniqueName: \"kubernetes.io/projected/6d7106f2-8e3a-49a1-8645-639690798bf5-kube-api-access-h78lc\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.464877 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:40.964867834 +0000 UTC m=+137.107370125 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.464899 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-bound-sa-token\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.465422 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lgpqh\" (UniqueName: \"kubernetes.io/projected/6a3a6a12-ab76-4742-91b4-af9ef4b70181-kube-api-access-lgpqh\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.465440 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsv85\" (UniqueName: \"kubernetes.io/projected/38a5280f-5933-40af-9c61-41f4766fc538-kube-api-access-qsv85\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.476366 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566338 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566627 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9xrj5\" (UniqueName: \"kubernetes.io/projected/a6632e27-c682-4974-8903-36443b11d704-kube-api-access-9xrj5\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566663 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drg7v\" (UniqueName: \"kubernetes.io/projected/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-kube-api-access-drg7v\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566686 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566707 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566722 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-serving-cert\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566741 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/aa0bee87-fb16-49ea-9680-1ab3ebc1caed-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9fflj\" (UID: \"aa0bee87-fb16-49ea-9680-1ab3ebc1caed\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566758 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f17aff8d-d3bb-461c-88c3-efc5501c99b0-signing-key\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566775 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79e553de-810b-486a-a739-8d9a5ce8e966-config\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566792 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4dztk\" (UniqueName: \"kubernetes.io/projected/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-kube-api-access-4dztk\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566807 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-config\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566829 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-encryption-config\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566864 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a5280f-5933-40af-9c61-41f4766fc538-audit-dir\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566886 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7106f2-8e3a-49a1-8645-639690798bf5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566904 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-certificates\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566924 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmswr\" (UniqueName: \"kubernetes.io/projected/f6d86049-ce66-4900-bc42-b1ac6864e79a-kube-api-access-wmswr\") pod \"control-plane-machine-set-operator-78cbb6b69f-gqsrq\" (UID: \"f6d86049-ce66-4900-bc42-b1ac6864e79a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566944 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be6d2d6c-b568-4b92-915d-20dd0d7f233c-config\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566968 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-etcd-client\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.566986 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567004 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6rz7\" (UniqueName: \"kubernetes.io/projected/d1c24056-5be6-4718-9e38-e74af7d815db-kube-api-access-z6rz7\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567022 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/24b3e483-5f23-4878-943e-efe9d09663ca-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567039 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/24b3e483-5f23-4878-943e-efe9d09663ca-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567062 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnd8g\" (UniqueName: \"kubernetes.io/projected/2317bf17-7606-4b48-b58d-800167e8131f-kube-api-access-rnd8g\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567081 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d1c24056-5be6-4718-9e38-e74af7d815db-srv-cert\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567103 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-ca\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567120 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/17774a78-d5ad-4d77-8426-ae730dc29981-trusted-ca\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567145 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-webhook-cert\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567162 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79e553de-810b-486a-a739-8d9a5ce8e966-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567182 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567209 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6632e27-c682-4974-8903-36443b11d704-auth-proxy-config\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567228 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7j2hr\" (UniqueName: \"kubernetes.io/projected/f17aff8d-d3bb-461c-88c3-efc5501c99b0-kube-api-access-7j2hr\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567245 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/17774a78-d5ad-4d77-8426-ae730dc29981-bound-sa-token\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567264 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f17aff8d-d3bb-461c-88c3-efc5501c99b0-signing-cabundle\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567279 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26lxl\" (UniqueName: \"kubernetes.io/projected/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-kube-api-access-26lxl\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567296 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-tmpfs\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567314 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567333 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2317bf17-7606-4b48-b58d-800167e8131f-metrics-tls\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567367 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567386 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567406 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567424 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-image-import-ca\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567442 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7106f2-8e3a-49a1-8645-639690798bf5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567461 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-secret-volume\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567477 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-socket-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567498 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-profile-collector-cert\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567518 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567537 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sx9vb\" (UniqueName: \"kubernetes.io/projected/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-kube-api-access-sx9vb\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567552 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-audit\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567572 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frbx6\" (UniqueName: \"kubernetes.io/projected/6875be01-c5cd-437a-a4aa-878ba5dbe400-kube-api-access-frbx6\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567593 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b99886aa-5100-4f05-a0c0-e437573bdd44-node-bootstrap-token\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567620 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-service-ca-bundle\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567658 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-metrics-certs\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567681 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xdx4\" (UniqueName: \"kubernetes.io/projected/24b3e483-5f23-4878-943e-efe9d09663ca-kube-api-access-8xdx4\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567702 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567720 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567741 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-frxcd\" (UniqueName: \"kubernetes.io/projected/17774a78-d5ad-4d77-8426-ae730dc29981-kube-api-access-frxcd\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567762 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/49503ce5-c224-4967-b968-0480264dca16-proxy-tls\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567783 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d1c24056-5be6-4718-9e38-e74af7d815db-profile-collector-cert\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567799 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st4s9\" (UniqueName: \"kubernetes.io/projected/987c49b0-614e-4a5d-9cf7-375e13485e55-kube-api-access-st4s9\") pod \"ingress-canary-f94z5\" (UID: \"987c49b0-614e-4a5d-9cf7-375e13485e55\") " pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567800 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a5280f-5933-40af-9c61-41f4766fc538-audit-dir\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567819 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567856 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf82f0b1-de6b-449a-be90-f76c217b315b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567880 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h78lc\" (UniqueName: \"kubernetes.io/projected/6d7106f2-8e3a-49a1-8645-639690798bf5-kube-api-access-h78lc\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567900 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-registration-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567919 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-bound-sa-token\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567944 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lgpqh\" (UniqueName: \"kubernetes.io/projected/6a3a6a12-ab76-4742-91b4-af9ef4b70181-kube-api-access-lgpqh\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567964 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsv85\" (UniqueName: \"kubernetes.io/projected/38a5280f-5933-40af-9c61-41f4766fc538-kube-api-access-qsv85\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.567989 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf82f0b1-de6b-449a-be90-f76c217b315b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568020 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b99886aa-5100-4f05-a0c0-e437573bdd44-certs\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568049 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9kfj\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-kube-api-access-z9kfj\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568069 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g6xcw\" (UniqueName: \"kubernetes.io/projected/5683ccd7-f245-4a19-aff7-686b294455f9-kube-api-access-g6xcw\") pod \"multus-admission-controller-857f4d67dd-sthb9\" (UID: \"5683ccd7-f245-4a19-aff7-686b294455f9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568090 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-etcd-serving-ca\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568109 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-default-certificate\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568127 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f6d86049-ce66-4900-bc42-b1ac6864e79a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gqsrq\" (UID: \"f6d86049-ce66-4900-bc42-b1ac6864e79a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568149 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a6632e27-c682-4974-8903-36443b11d704-proxy-tls\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568168 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6a3a6a12-ab76-4742-91b4-af9ef4b70181-node-pullsecrets\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568186 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6a3a6a12-ab76-4742-91b4-af9ef4b70181-audit-dir\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568203 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-serving-cert\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568218 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568239 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568258 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/987c49b0-614e-4a5d-9cf7-375e13485e55-cert\") pod \"ingress-canary-f94z5\" (UID: \"987c49b0-614e-4a5d-9cf7-375e13485e55\") " pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568276 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-mountpoint-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568292 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-stats-auth\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568312 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/24b3e483-5f23-4878-943e-efe9d09663ca-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568332 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-trusted-ca-bundle\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568353 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z99xf\" (UniqueName: \"kubernetes.io/projected/21df8944-4816-4a75-83e5-0656e313029c-kube-api-access-z99xf\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568371 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69dfl\" (UniqueName: \"kubernetes.io/projected/49503ce5-c224-4967-b968-0480264dca16-kube-api-access-69dfl\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568392 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p7s27\" (UniqueName: \"kubernetes.io/projected/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-kube-api-access-p7s27\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568413 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-config\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568429 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21df8944-4816-4a75-83e5-0656e313029c-service-ca-bundle\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568449 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-config\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568464 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-audit-policies\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568482 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568501 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2317bf17-7606-4b48-b58d-800167e8131f-config-volume\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568522 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-45j6j\" (UniqueName: \"kubernetes.io/projected/aa0bee87-fb16-49ea-9680-1ab3ebc1caed-kube-api-access-45j6j\") pod \"cluster-samples-operator-665b6dd947-9fflj\" (UID: \"aa0bee87-fb16-49ea-9680-1ab3ebc1caed\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568539 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/be6d2d6c-b568-4b92-915d-20dd0d7f233c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568573 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hfgf9\" (UniqueName: \"kubernetes.io/projected/6ba8530d-18bd-4021-8187-6c716bc87a32-kube-api-access-hfgf9\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568592 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-config-volume\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568611 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be6d2d6c-b568-4b92-915d-20dd0d7f233c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568628 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6875be01-c5cd-437a-a4aa-878ba5dbe400-serving-cert\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568667 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-tls\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568685 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n866q\" (UniqueName: \"kubernetes.io/projected/4ee0f119-a41c-44c4-8057-a8dd22d6b5c3-kube-api-access-n866q\") pod \"downloads-7954f5f757-984ks\" (UID: \"4ee0f119-a41c-44c4-8057-a8dd22d6b5c3\") " pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568704 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5683ccd7-f245-4a19-aff7-686b294455f9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sthb9\" (UID: \"5683ccd7-f245-4a19-aff7-686b294455f9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568721 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-serving-cert\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568739 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-trusted-ca\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568757 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79e553de-810b-486a-a739-8d9a5ce8e966-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568772 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/49503ce5-c224-4967-b968-0480264dca16-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568789 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-srv-cert\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568808 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/17774a78-d5ad-4d77-8426-ae730dc29981-metrics-tls\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568826 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mdmvd\" (UniqueName: \"kubernetes.io/projected/10fe4917-ff80-4a00-be3d-24398ddde1ae-kube-api-access-mdmvd\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568882 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-client\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568898 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-csi-data-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568935 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-config\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568951 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-plugins-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568971 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a6632e27-c682-4974-8903-36443b11d704-images\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.568990 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.569006 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zp766\" (UniqueName: \"kubernetes.io/projected/14e6c49b-951c-4c4e-bacb-7d360d8bdd59-kube-api-access-zp766\") pod \"migrator-59844c95c7-j7fbj\" (UID: \"14e6c49b-951c-4c4e-bacb-7d360d8bdd59\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.569025 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c8cfb\" (UniqueName: \"kubernetes.io/projected/b99886aa-5100-4f05-a0c0-e437573bdd44-kube-api-access-c8cfb\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.569045 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-apiservice-cert\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.569065 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-service-ca\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.569092 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.569837 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.570880 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/79e553de-810b-486a-a739-8d9a5ce8e966-config\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.572330 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.574131 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/f17aff8d-d3bb-461c-88c3-efc5501c99b0-signing-cabundle\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.575112 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.075092924 +0000 UTC m=+137.217595215 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.585242 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/49503ce5-c224-4967-b968-0480264dca16-proxy-tls\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.585663 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-trusted-ca-bundle\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.586054 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.586475 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-webhook-cert\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.586539 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.589885 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-encryption-config\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.590308 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-config\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.590603 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/17774a78-d5ad-4d77-8426-ae730dc29981-trusted-ca\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.593006 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-image-import-ca\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.594781 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6a3a6a12-ab76-4742-91b4-af9ef4b70181-node-pullsecrets\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.595027 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6d7106f2-8e3a-49a1-8645-639690798bf5-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.595159 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/f6d86049-ce66-4900-bc42-b1ac6864e79a-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-gqsrq\" (UID: \"f6d86049-ce66-4900-bc42-b1ac6864e79a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.595573 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-serving-cert\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596240 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596343 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-trusted-ca\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596341 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/aa0bee87-fb16-49ea-9680-1ab3ebc1caed-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-9fflj\" (UID: \"aa0bee87-fb16-49ea-9680-1ab3ebc1caed\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596651 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-profile-collector-cert\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596867 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596928 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-config\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.596983 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-service-ca\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.597459 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-service-ca-bundle\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.597474 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/21df8944-4816-4a75-83e5-0656e313029c-service-ca-bundle\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.599063 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.600107 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/49503ce5-c224-4967-b968-0480264dca16-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.602315 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-config\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.602424 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.602798 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.602860 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6d7106f2-8e3a-49a1-8645-639690798bf5-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.603103 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6875be01-c5cd-437a-a4aa-878ba5dbe400-serving-cert\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.603177 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be6d2d6c-b568-4b92-915d-20dd0d7f233c-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.603291 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-audit-policies\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.603534 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-tmpfs\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.603777 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.605105 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-stats-auth\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.605941 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-config\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.606493 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-serving-cert\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.607308 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.607619 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf82f0b1-de6b-449a-be90-f76c217b315b-ca-trust-extracted\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.607730 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d1c24056-5be6-4718-9e38-e74af7d815db-profile-collector-cert\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.607729 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.608462 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/5683ccd7-f245-4a19-aff7-686b294455f9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-sthb9\" (UID: \"5683ccd7-f245-4a19-aff7-686b294455f9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.608491 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/24b3e483-5f23-4878-943e-efe9d09663ca-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.608709 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/79e553de-810b-486a-a739-8d9a5ce8e966-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.608899 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6a3a6a12-ab76-4742-91b4-af9ef4b70181-audit-dir\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.610271 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.610417 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-certificates\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.611099 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-config-volume\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.611208 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-tls\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.611216 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-ca\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.611516 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-serving-cert\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.612635 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-audit\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.612986 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6875be01-c5cd-437a-a4aa-878ba5dbe400-etcd-client\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.613668 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be6d2d6c-b568-4b92-915d-20dd0d7f233c-config\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.613793 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6a3a6a12-ab76-4742-91b4-af9ef4b70181-etcd-serving-ca\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.614364 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf82f0b1-de6b-449a-be90-f76c217b315b-installation-pull-secrets\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.614509 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.614516 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/24b3e483-5f23-4878-943e-efe9d09663ca-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.615467 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-2d96p"] Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.615609 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-apiservice-cert\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.615806 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/a6632e27-c682-4974-8903-36443b11d704-auth-proxy-config\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.616567 4693 generic.go:334] "Generic (PLEG): container finished" podID="c428410b-f6ac-47d2-aae7-76f314f718db" containerID="48d4d93208bdf1afbb7e3f824c3375dce34b1f9277906e3bf0adaabca639e916" exitCode=0 Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.616612 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" event={"ID":"c428410b-f6ac-47d2-aae7-76f314f718db","Type":"ContainerDied","Data":"48d4d93208bdf1afbb7e3f824c3375dce34b1f9277906e3bf0adaabca639e916"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.618057 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-default-certificate\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.621432 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-srv-cert\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.621529 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6a3a6a12-ab76-4742-91b4-af9ef4b70181-etcd-client\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.621748 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/a6632e27-c682-4974-8903-36443b11d704-images\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.621833 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622008 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-secret-volume\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622129 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/f17aff8d-d3bb-461c-88c3-efc5501c99b0-signing-key\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622228 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" event={"ID":"4385a1e7-41f2-423b-8a29-749bbb181512","Type":"ContainerStarted","Data":"8c8ffd9c26bc6ed078670189e926af35beda0b4a3246896a42e77f66b95a335b"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622307 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" event={"ID":"4385a1e7-41f2-423b-8a29-749bbb181512","Type":"ContainerStarted","Data":"cf23f9a90d59b7e970e8598a575ce19d76279040608cf66fbfe1de2f28060308"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622446 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" event={"ID":"4385a1e7-41f2-423b-8a29-749bbb181512","Type":"ContainerStarted","Data":"22feef1155ef8fc8eb9696db517b341d05c60eff2286cafd2895dc4d377665c2"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622417 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9xrj5\" (UniqueName: \"kubernetes.io/projected/a6632e27-c682-4974-8903-36443b11d704-kube-api-access-9xrj5\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622534 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.622983 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d1c24056-5be6-4718-9e38-e74af7d815db-srv-cert\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.624756 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.624947 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drg7v\" (UniqueName: \"kubernetes.io/projected/77b3a304-fd0f-45a2-b0df-a9adf4a9e83c-kube-api-access-drg7v\") pod \"authentication-operator-69f744f599-5t5mq\" (UID: \"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.628764 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/17774a78-d5ad-4d77-8426-ae730dc29981-metrics-tls\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: W1122 09:05:40.629810 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd019eb1c_20c3_4662_87b1_2dbd0b92ce1c.slice/crio-ce97a36f2bfc24a70a544de9fc4ce7585177689010a1036164d55b9a1034690d WatchSource:0}: Error finding container ce97a36f2bfc24a70a544de9fc4ce7585177689010a1036164d55b9a1034690d: Status 404 returned error can't find the container with id ce97a36f2bfc24a70a544de9fc4ce7585177689010a1036164d55b9a1034690d Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.630933 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" event={"ID":"db30dd05-0c41-415a-aa26-e78c9ebae1bc","Type":"ContainerStarted","Data":"f85193c40caa60a4fd8e8d58100a33d4fa94bfe9f29c553a0b4adfc49e0fbc66"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.630974 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" event={"ID":"db30dd05-0c41-415a-aa26-e78c9ebae1bc","Type":"ContainerStarted","Data":"d0c3a1a4f15de9f5b6dacc89e84211d70f6c9b512a7adbc0c90a4e5224db486a"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.630984 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" event={"ID":"db30dd05-0c41-415a-aa26-e78c9ebae1bc","Type":"ContainerStarted","Data":"73a8cdc1d7d31683f45108c95cf0effb4b880e992cbfccd39e467bf80ec78e94"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.631560 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/21df8944-4816-4a75-83e5-0656e313029c-metrics-certs\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.639692 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/a6632e27-c682-4974-8903-36443b11d704-proxy-tls\") pod \"machine-config-operator-74547568cd-mz7f4\" (UID: \"a6632e27-c682-4974-8903-36443b11d704\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.645411 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" event={"ID":"24790c77-ff73-4528-a742-ab163edd8ac8","Type":"ContainerStarted","Data":"65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.645445 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" event={"ID":"24790c77-ff73-4528-a742-ab163edd8ac8","Type":"ContainerStarted","Data":"235c5f62adbd87296ff1348b625c28be64a50f40567c7b2c18ada111282b52c1"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.645542 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.646055 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.653608 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-f7n6n\" (UID: \"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.653933 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" event={"ID":"c9649ff6-4471-48d9-b751-56ac85bd9c91","Type":"ContainerStarted","Data":"e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.653954 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" event={"ID":"c9649ff6-4471-48d9-b751-56ac85bd9c91","Type":"ContainerStarted","Data":"53a0330b188e3e7654370757e8c67a79dec1dd65fae989d6dae409f943241bd1"} Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.654443 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.662921 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.673858 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674168 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv"] Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674283 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b99886aa-5100-4f05-a0c0-e437573bdd44-certs\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674325 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/987c49b0-614e-4a5d-9cf7-375e13485e55-cert\") pod \"ingress-canary-f94z5\" (UID: \"987c49b0-614e-4a5d-9cf7-375e13485e55\") " pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674344 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-mountpoint-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674382 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2317bf17-7606-4b48-b58d-800167e8131f-config-volume\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674452 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mdmvd\" (UniqueName: \"kubernetes.io/projected/10fe4917-ff80-4a00-be3d-24398ddde1ae-kube-api-access-mdmvd\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674489 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-csi-data-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674544 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-plugins-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674563 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c8cfb\" (UniqueName: \"kubernetes.io/projected/b99886aa-5100-4f05-a0c0-e437573bdd44-kube-api-access-c8cfb\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674611 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnd8g\" (UniqueName: \"kubernetes.io/projected/2317bf17-7606-4b48-b58d-800167e8131f-kube-api-access-rnd8g\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674674 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2317bf17-7606-4b48-b58d-800167e8131f-metrics-tls\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674703 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-socket-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674727 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b99886aa-5100-4f05-a0c0-e437573bdd44-node-bootstrap-token\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674743 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674777 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st4s9\" (UniqueName: \"kubernetes.io/projected/987c49b0-614e-4a5d-9cf7-375e13485e55-kube-api-access-st4s9\") pod \"ingress-canary-f94z5\" (UID: \"987c49b0-614e-4a5d-9cf7-375e13485e55\") " pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.674798 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-registration-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.675016 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-registration-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.675614 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-mountpoint-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.676438 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2317bf17-7606-4b48-b58d-800167e8131f-config-volume\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.676930 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-csi-data-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.676950 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-plugins-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.677276 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.177262818 +0000 UTC m=+137.319765109 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.677547 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/10fe4917-ff80-4a00-be3d-24398ddde1ae-socket-dir\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.678589 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/987c49b0-614e-4a5d-9cf7-375e13485e55-cert\") pod \"ingress-canary-f94z5\" (UID: \"987c49b0-614e-4a5d-9cf7-375e13485e55\") " pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.679373 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/b99886aa-5100-4f05-a0c0-e437573bdd44-certs\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.680302 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/b99886aa-5100-4f05-a0c0-e437573bdd44-node-bootstrap-token\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.690394 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frxcd\" (UniqueName: \"kubernetes.io/projected/17774a78-d5ad-4d77-8426-ae730dc29981-kube-api-access-frxcd\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.691722 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2317bf17-7606-4b48-b58d-800167e8131f-metrics-tls\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.697161 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7j2hr\" (UniqueName: \"kubernetes.io/projected/f17aff8d-d3bb-461c-88c3-efc5501c99b0-kube-api-access-7j2hr\") pod \"service-ca-9c57cc56f-c5cns\" (UID: \"f17aff8d-d3bb-461c-88c3-efc5501c99b0\") " pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.697945 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.717572 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-chjcb"] Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.728313 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/79e553de-810b-486a-a739-8d9a5ce8e966-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-jxzb9\" (UID: \"79e553de-810b-486a-a739-8d9a5ce8e966\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.744045 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/17774a78-d5ad-4d77-8426-ae730dc29981-bound-sa-token\") pod \"ingress-operator-5b745b69d9-m84vc\" (UID: \"17774a78-d5ad-4d77-8426-ae730dc29981\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.765169 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p7s27\" (UniqueName: \"kubernetes.io/projected/50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf-kube-api-access-p7s27\") pod \"olm-operator-6b444d44fb-nqqnd\" (UID: \"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.776078 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.777178 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.27716538 +0000 UTC m=+137.419667662 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.786153 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sx9vb\" (UniqueName: \"kubernetes.io/projected/2e3e8bce-5727-4b65-9d04-ebde1f1dc755-kube-api-access-sx9vb\") pod \"packageserver-d55dfcdfc-7j6hm\" (UID: \"2e3e8bce-5727-4b65-9d04-ebde1f1dc755\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.805878 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xdx4\" (UniqueName: \"kubernetes.io/projected/24b3e483-5f23-4878-943e-efe9d09663ca-kube-api-access-8xdx4\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.824696 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n866q\" (UniqueName: \"kubernetes.io/projected/4ee0f119-a41c-44c4-8057-a8dd22d6b5c3-kube-api-access-n866q\") pod \"downloads-7954f5f757-984ks\" (UID: \"4ee0f119-a41c-44c4-8057-a8dd22d6b5c3\") " pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.838261 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h78lc\" (UniqueName: \"kubernetes.io/projected/6d7106f2-8e3a-49a1-8645-639690798bf5-kube-api-access-h78lc\") pod \"kube-storage-version-migrator-operator-b67b599dd-hgv6m\" (UID: \"6d7106f2-8e3a-49a1-8645-639690798bf5\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.861078 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/24b3e483-5f23-4878-943e-efe9d09663ca-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-l4bpn\" (UID: \"24b3e483-5f23-4878-943e-efe9d09663ca\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.875575 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.877558 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.877826 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.37781647 +0000 UTC m=+137.520318760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.886188 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.886287 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lgpqh\" (UniqueName: \"kubernetes.io/projected/6a3a6a12-ab76-4742-91b4-af9ef4b70181-kube-api-access-lgpqh\") pod \"apiserver-76f77b778f-m97gh\" (UID: \"6a3a6a12-ab76-4742-91b4-af9ef4b70181\") " pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.892952 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.897443 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.913565 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4dztk\" (UniqueName: \"kubernetes.io/projected/bf4475aa-6a09-4f8d-a182-d575ffb0f9f6-kube-api-access-4dztk\") pod \"service-ca-operator-777779d784-fdrlh\" (UID: \"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.924299 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-frbx6\" (UniqueName: \"kubernetes.io/projected/6875be01-c5cd-437a-a4aa-878ba5dbe400-kube-api-access-frbx6\") pod \"etcd-operator-b45778765-rz6dr\" (UID: \"6875be01-c5cd-437a-a4aa-878ba5dbe400\") " pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.939717 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.952390 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69dfl\" (UniqueName: \"kubernetes.io/projected/49503ce5-c224-4967-b968-0480264dca16-kube-api-access-69dfl\") pod \"machine-config-controller-84d6567774-9blmp\" (UID: \"49503ce5-c224-4967-b968-0480264dca16\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.955348 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n"] Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.956682 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.974421 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.978309 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:40 crc kubenswrapper[4693]: E1122 09:05:40.978717 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.478704955 +0000 UTC m=+137.621207246 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.983599 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-45j6j\" (UniqueName: \"kubernetes.io/projected/aa0bee87-fb16-49ea-9680-1ab3ebc1caed-kube-api-access-45j6j\") pod \"cluster-samples-operator-665b6dd947-9fflj\" (UID: \"aa0bee87-fb16-49ea-9680-1ab3ebc1caed\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:40 crc kubenswrapper[4693]: I1122 09:05:40.991563 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.001236 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-bound-sa-token\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.009630 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.009870 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9kfj\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-kube-api-access-z9kfj\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.027153 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hfgf9\" (UniqueName: \"kubernetes.io/projected/6ba8530d-18bd-4021-8187-6c716bc87a32-kube-api-access-hfgf9\") pod \"marketplace-operator-79b997595-7skdb\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.041320 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6rz7\" (UniqueName: \"kubernetes.io/projected/d1c24056-5be6-4718-9e38-e74af7d815db-kube-api-access-z6rz7\") pod \"catalog-operator-68c6474976-fkplq\" (UID: \"d1c24056-5be6-4718-9e38-e74af7d815db\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.076215 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsv85\" (UniqueName: \"kubernetes.io/projected/38a5280f-5933-40af-9c61-41f4766fc538-kube-api-access-qsv85\") pod \"oauth-openshift-558db77b4-6s452\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.081990 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082036 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082054 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082118 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082133 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082149 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082192 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.082219 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.085051 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0177d6a-4233-4759-9f66-facba4d65adf-config\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.085233 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c-metrics-tls\") pod \"dns-operator-744455d44c-dgfk9\" (UID: \"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c\") " pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.085479 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.085707 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.585697618 +0000 UTC m=+137.728199909 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.086361 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") pod \"console-f9d7485db-m5ldq\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.086459 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/be6d2d6c-b568-4b92-915d-20dd0d7f233c-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-bf9vt\" (UID: \"be6d2d6c-b568-4b92-915d-20dd0d7f233c\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.086504 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0b941991-050f-44f9-a7e8-eb0b1ae14ade-config\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.091129 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f0177d6a-4233-4759-9f66-facba4d65adf-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-k28v7\" (UID: \"f0177d6a-4233-4759-9f66-facba4d65adf\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.096382 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b941991-050f-44f9-a7e8-eb0b1ae14ade-serving-cert\") pod \"console-operator-58897d9998-wg5vx\" (UID: \"0b941991-050f-44f9-a7e8-eb0b1ae14ade\") " pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.097665 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-5t5mq"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.110087 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z99xf\" (UniqueName: \"kubernetes.io/projected/21df8944-4816-4a75-83e5-0656e313029c-kube-api-access-z99xf\") pod \"router-default-5444994796-2z7n2\" (UID: \"21df8944-4816-4a75-83e5-0656e313029c\") " pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.119459 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zp766\" (UniqueName: \"kubernetes.io/projected/14e6c49b-951c-4c4e-bacb-7d360d8bdd59-kube-api-access-zp766\") pod \"migrator-59844c95c7-j7fbj\" (UID: \"14e6c49b-951c-4c4e-bacb-7d360d8bdd59\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.137902 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.145977 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26lxl\" (UniqueName: \"kubernetes.io/projected/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-kube-api-access-26lxl\") pod \"collect-profiles-29396700-nqzbf\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.160998 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmswr\" (UniqueName: \"kubernetes.io/projected/f6d86049-ce66-4900-bc42-b1ac6864e79a-kube-api-access-wmswr\") pod \"control-plane-machine-set-operator-78cbb6b69f-gqsrq\" (UID: \"f6d86049-ce66-4900-bc42-b1ac6864e79a\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.168317 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.183317 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.183588 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.683572378 +0000 UTC m=+137.826074668 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.183709 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.183383 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.184073 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.68406361 +0000 UTC m=+137.826565901 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.187067 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g6xcw\" (UniqueName: \"kubernetes.io/projected/5683ccd7-f245-4a19-aff7-686b294455f9-kube-api-access-g6xcw\") pod \"multus-admission-controller-857f4d67dd-sthb9\" (UID: \"5683ccd7-f245-4a19-aff7-686b294455f9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.202791 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.208383 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.213365 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.219487 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.221375 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mdmvd\" (UniqueName: \"kubernetes.io/projected/10fe4917-ff80-4a00-be3d-24398ddde1ae-kube-api-access-mdmvd\") pod \"csi-hostpathplugin-wwrvj\" (UID: \"10fe4917-ff80-4a00-be3d-24398ddde1ae\") " pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.228173 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.231171 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.251947 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.268364 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.274735 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.281212 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnd8g\" (UniqueName: \"kubernetes.io/projected/2317bf17-7606-4b48-b58d-800167e8131f-kube-api-access-rnd8g\") pod \"dns-default-rv5vl\" (UID: \"2317bf17-7606-4b48-b58d-800167e8131f\") " pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.281446 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st4s9\" (UniqueName: \"kubernetes.io/projected/987c49b0-614e-4a5d-9cf7-375e13485e55-kube-api-access-st4s9\") pod \"ingress-canary-f94z5\" (UID: \"987c49b0-614e-4a5d-9cf7-375e13485e55\") " pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.282024 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c8cfb\" (UniqueName: \"kubernetes.io/projected/b99886aa-5100-4f05-a0c0-e437573bdd44-kube-api-access-c8cfb\") pod \"machine-config-server-6bbcf\" (UID: \"b99886aa-5100-4f05-a0c0-e437573bdd44\") " pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.285142 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.285561 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.785548768 +0000 UTC m=+137.928051060 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.288205 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.304497 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.319109 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-m97gh"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.320266 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-f94z5" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.320528 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.320954 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-6bbcf" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.340815 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-984ks"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.344462 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.344467 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.358745 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.358871 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:41 crc kubenswrapper[4693]: W1122 09:05:41.363883 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6a3a6a12_ab76_4742_91b4_af9ef4b70181.slice/crio-606800f5d4a7dcc719bc5f0eef527bc0f027ce2570a34e5f1ceddb296f604e15 WatchSource:0}: Error finding container 606800f5d4a7dcc719bc5f0eef527bc0f027ce2570a34e5f1ceddb296f604e15: Status 404 returned error can't find the container with id 606800f5d4a7dcc719bc5f0eef527bc0f027ce2570a34e5f1ceddb296f604e15 Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.371468 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" Nov 22 09:05:41 crc kubenswrapper[4693]: W1122 09:05:41.373938 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda6632e27_c682_4974_8903_36443b11d704.slice/crio-2ba2fb6784e34732e8d2bda710d122bafd76b2aea0ed3e5c0fa7d02669e49f33 WatchSource:0}: Error finding container 2ba2fb6784e34732e8d2bda710d122bafd76b2aea0ed3e5c0fa7d02669e49f33: Status 404 returned error can't find the container with id 2ba2fb6784e34732e8d2bda710d122bafd76b2aea0ed3e5c0fa7d02669e49f33 Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.386277 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.386742 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.886731428 +0000 UTC m=+138.029233718 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.469923 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.487531 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.487936 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.987917131 +0000 UTC m=+138.130419422 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.488548 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.488871 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:41.988860344 +0000 UTC m=+138.131362636 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.530591 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.591181 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.591279 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.091244753 +0000 UTC m=+138.233747044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.591349 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.591908 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.09189792 +0000 UTC m=+138.234400211 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.663529 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" event={"ID":"a6632e27-c682-4974-8903-36443b11d704","Type":"ContainerStarted","Data":"2ba2fb6784e34732e8d2bda710d122bafd76b2aea0ed3e5c0fa7d02669e49f33"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.667647 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2z7n2" event={"ID":"21df8944-4816-4a75-83e5-0656e313029c","Type":"ContainerStarted","Data":"88295b0c85df49b44516d0fed7f988fa643777e1b9cdab2fd0b565e4f516a8bb"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.669984 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" event={"ID":"6a3a6a12-ab76-4742-91b4-af9ef4b70181","Type":"ContainerStarted","Data":"606800f5d4a7dcc719bc5f0eef527bc0f027ce2570a34e5f1ceddb296f604e15"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.675820 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" event={"ID":"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8","Type":"ContainerStarted","Data":"fd669acf54709a3a80778be13e1c6c58c59ce2382ade850492b0149a33438e35"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.675866 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" event={"ID":"8fa803f2-c3af-4e7d-a389-fc1e56fa9fa8","Type":"ContainerStarted","Data":"6838ec26f365af04f8c521e490f51e29dc5b02f260cbecb111875861a8976fbc"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.678328 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" event={"ID":"a6822f49-a368-49db-8f43-63abe760e740","Type":"ContainerStarted","Data":"0ee3b7b66f26f255543b95255004e2651534592cdf73bb747f9a6406cc82a6d9"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.678361 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" event={"ID":"a6822f49-a368-49db-8f43-63abe760e740","Type":"ContainerStarted","Data":"16e15bb062662c8e807cec2ec5678c50fc2e5cfe918d8372acd955f58da7855f"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.682451 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" event={"ID":"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c","Type":"ContainerStarted","Data":"822e382e047e48e9e60f3b695c819397b9124a4b831c5538c411735bf683bb1f"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.685762 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-984ks" event={"ID":"4ee0f119-a41c-44c4-8057-a8dd22d6b5c3","Type":"ContainerStarted","Data":"40eaeaff5872ef177376ccf91559331891d7d1fa0f3f6aaa97204a39ff7f3ef0"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.690777 4693 generic.go:334] "Generic (PLEG): container finished" podID="d019eb1c-20c3-4662-87b1-2dbd0b92ce1c" containerID="c6dd8d2afa71e1e33b335cbc9b1c31d278941e795f61716ccc4c8bf06d6d16e3" exitCode=0 Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.690799 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" event={"ID":"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c","Type":"ContainerDied","Data":"c6dd8d2afa71e1e33b335cbc9b1c31d278941e795f61716ccc4c8bf06d6d16e3"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.690821 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" event={"ID":"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c","Type":"ContainerStarted","Data":"ce97a36f2bfc24a70a544de9fc4ce7585177689010a1036164d55b9a1034690d"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.692089 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.692514 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.192499407 +0000 UTC m=+138.335001698 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.697882 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" event={"ID":"c428410b-f6ac-47d2-aae7-76f314f718db","Type":"ContainerStarted","Data":"15414ccfcf612dfca42d00363b81f782f329a8a64e92ea6913456282628266ad"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.703827 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" event={"ID":"56f55ada-5668-4a07-888e-1c578214f660","Type":"ContainerStarted","Data":"a416b594855a75115f5891858f29206de7dc300489bd942630d958be9ba911ac"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.703906 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" event={"ID":"56f55ada-5668-4a07-888e-1c578214f660","Type":"ContainerStarted","Data":"de76937e1f903fc7363bed76036b72f868ea663a5667813f19923bfc771f3636"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.703917 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" event={"ID":"56f55ada-5668-4a07-888e-1c578214f660","Type":"ContainerStarted","Data":"84cb6fd92e43c07fb7fa68d585019fdd27c64fbf2a82ad79c9f99ec53bda94c3"} Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.739535 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" podStartSLOduration=119.739519241 podStartE2EDuration="1m59.739519241s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:41.698296026 +0000 UTC m=+137.840798317" watchObservedRunningTime="2025-11-22 09:05:41.739519241 +0000 UTC m=+137.882021532" Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.800316 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.801056 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.301037935 +0000 UTC m=+138.443540226 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.925228 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.925387 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.425364098 +0000 UTC m=+138.567866388 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.925702 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:41 crc kubenswrapper[4693]: E1122 09:05:41.926032 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.426021924 +0000 UTC m=+138.568524216 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.944335 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.969403 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.979700 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc"] Nov 22 09:05:41 crc kubenswrapper[4693]: I1122 09:05:41.981610 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-c5cns"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.027559 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.028029 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.528015508 +0000 UTC m=+138.670517799 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: W1122 09:05:42.039599 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod50bce5fc_c0a6_4c48_8ba1_c752fcbb9fbf.slice/crio-76746172d1ba6672b355e5fcb5af883794c6fb18ee53e2c9df5bdf067e51b773 WatchSource:0}: Error finding container 76746172d1ba6672b355e5fcb5af883794c6fb18ee53e2c9df5bdf067e51b773: Status 404 returned error can't find the container with id 76746172d1ba6672b355e5fcb5af883794c6fb18ee53e2c9df5bdf067e51b773 Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.129370 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.130011 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.629992921 +0000 UTC m=+138.772495213 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.152673 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-5m8d6" podStartSLOduration=121.152648146 podStartE2EDuration="2m1.152648146s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:42.136447737 +0000 UTC m=+138.278950028" watchObservedRunningTime="2025-11-22 09:05:42.152648146 +0000 UTC m=+138.295150437" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.182830 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" podStartSLOduration=120.182814814 podStartE2EDuration="2m0.182814814s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:42.181245223 +0000 UTC m=+138.323747514" watchObservedRunningTime="2025-11-22 09:05:42.182814814 +0000 UTC m=+138.325317105" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.231304 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.231712 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.731696789 +0000 UTC m=+138.874199080 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.314703 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.332326 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.332612 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.832601506 +0000 UTC m=+138.975103797 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.433192 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.433756 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:42.933742626 +0000 UTC m=+139.076244917 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.536496 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.537044 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.037033188 +0000 UTC m=+139.179535479 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.637353 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.637549 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.137535597 +0000 UTC m=+139.280037889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.637890 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.638333 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.138325593 +0000 UTC m=+139.280827884 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.698543 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.723032 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.728714 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.733177 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.743659 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.744227 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.244210263 +0000 UTC m=+139.386712554 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.746616 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.778072 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.788732 4693 generic.go:334] "Generic (PLEG): container finished" podID="6a3a6a12-ab76-4742-91b4-af9ef4b70181" containerID="f7bc697d54776c3230eaf62269c1ab6f328dc59d0f11734216e3212329af6855" exitCode=0 Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.788905 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" event={"ID":"6a3a6a12-ab76-4742-91b4-af9ef4b70181","Type":"ContainerDied","Data":"f7bc697d54776c3230eaf62269c1ab6f328dc59d0f11734216e3212329af6855"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.795383 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" event={"ID":"d019eb1c-20c3-4662-87b1-2dbd0b92ce1c","Type":"ContainerStarted","Data":"8627ef43552bb1fcbfd0c4847d69501420819cffd5cca8542402eb5626f837df"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.795789 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.796068 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.798687 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" event={"ID":"17774a78-d5ad-4d77-8426-ae730dc29981","Type":"ContainerStarted","Data":"386f067e1e6f8e7fcd4a3cf820195ca59de9fc5abb119a2d80417b85ac0f62b4"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.798738 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" event={"ID":"17774a78-d5ad-4d77-8426-ae730dc29981","Type":"ContainerStarted","Data":"cbe72fd053a9b989fdd82ce809ef5cf9d2a7e235e284214a4558b0019c72a397"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.821952 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wwrvj"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.832972 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" event={"ID":"6d7106f2-8e3a-49a1-8645-639690798bf5","Type":"ContainerStarted","Data":"bd88f1fe31f9251d8cf768f63f7792269292798b378f9c171b28f2fd7ea30e80"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.833008 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" event={"ID":"6d7106f2-8e3a-49a1-8645-639690798bf5","Type":"ContainerStarted","Data":"ff53fcb77f8756739ac078a34810f9f2460503d60dd897f227d96099dbe42240"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.837368 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" podStartSLOduration=120.837349498 podStartE2EDuration="2m0.837349498s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:42.83500534 +0000 UTC m=+138.977507632" watchObservedRunningTime="2025-11-22 09:05:42.837349498 +0000 UTC m=+138.979851789" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.846092 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" event={"ID":"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf","Type":"ContainerStarted","Data":"a237576ea7f159c07e05de497e20b831c6dd62a5c02f772ec8abe474bcc69488"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.846131 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" event={"ID":"50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf","Type":"ContainerStarted","Data":"76746172d1ba6672b355e5fcb5af883794c6fb18ee53e2c9df5bdf067e51b773"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.846626 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.848326 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.850349 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.350335253 +0000 UTC m=+139.492837545 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.850693 4693 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-nqqnd container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.850733 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" podUID="50bce5fc-c0a6-4c48-8ba1-c752fcbb9fbf" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.875258 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" event={"ID":"a6632e27-c682-4974-8903-36443b11d704","Type":"ContainerStarted","Data":"3d0a2d902f3c2a18ef871a7dab8d1bbe6e81725cde8bdb8738f43339eac4d4fd"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.875304 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" event={"ID":"a6632e27-c682-4974-8903-36443b11d704","Type":"ContainerStarted","Data":"f47b40f2b7d90732211a86c6cf164600dba7c7272f2474a742c92f33c29d4730"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.877308 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-2z7n2" event={"ID":"21df8944-4816-4a75-83e5-0656e313029c","Type":"ContainerStarted","Data":"76f0fc66a2e203293d11eb7d3bfca5ac2f99d29dbf7a53d690bd12fb6f866aa0"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.886106 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-984ks" event={"ID":"4ee0f119-a41c-44c4-8057-a8dd22d6b5c3","Type":"ContainerStarted","Data":"3c4fdde351f15db79fc8a90d98ff839210f6c9818080b152be01bdcba49a738f"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.886372 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.891946 4693 patch_prober.go:28] interesting pod/downloads-7954f5f757-984ks container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.891984 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-984ks" podUID="4ee0f119-a41c-44c4-8057-a8dd22d6b5c3" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.900591 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" event={"ID":"79e553de-810b-486a-a739-8d9a5ce8e966","Type":"ContainerStarted","Data":"d571e92ae21f6b302fdc3e51668dc2a1dee3de192a43bb3ad5f4832e5c6a9af4"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.904643 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-chjcb" podStartSLOduration=120.904624094 podStartE2EDuration="2m0.904624094s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:42.866804568 +0000 UTC m=+139.009306859" watchObservedRunningTime="2025-11-22 09:05:42.904624094 +0000 UTC m=+139.047126385" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.905276 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dgnsw"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.913733 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" podStartSLOduration=121.913717992 podStartE2EDuration="2m1.913717992s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:42.910661005 +0000 UTC m=+139.053163296" watchObservedRunningTime="2025-11-22 09:05:42.913717992 +0000 UTC m=+139.056220283" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.917086 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" event={"ID":"f17aff8d-d3bb-461c-88c3-efc5501c99b0","Type":"ContainerStarted","Data":"a3bc892e93de5db851c6dba777f0a487bccb5d27d683de9543ef09c634b2a224"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.917108 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" event={"ID":"f17aff8d-d3bb-461c-88c3-efc5501c99b0","Type":"ContainerStarted","Data":"393155aabcefccba9a48927e01e1310607c68c00ab96ee77a58eeb2787dfc1fa"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.927430 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-wg5vx"] Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.929898 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" event={"ID":"2e3e8bce-5727-4b65-9d04-ebde1f1dc755","Type":"ContainerStarted","Data":"4e0557107edb150c1218f77a71329106997eeb8cf1752dad4cdd3357475c2d77"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.929926 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" event={"ID":"2e3e8bce-5727-4b65-9d04-ebde1f1dc755","Type":"ContainerStarted","Data":"fa2d6c2018ff7b42ffdf5a468c71d259f13e596a6875cafe8b07aebdff0634c4"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.933349 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.951995 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.952337 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.452320932 +0000 UTC m=+139.594823223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.952591 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:42 crc kubenswrapper[4693]: E1122 09:05:42.954558 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.454544923 +0000 UTC m=+139.597047215 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.958528 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" event={"ID":"77b3a304-fd0f-45a2-b0df-a9adf4a9e83c","Type":"ContainerStarted","Data":"fd229f9da48c23aa05a4d5cfd340ea0838011eb596f2a47e6a85de21e30d8722"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.978680 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-rzchv" podStartSLOduration=120.978666135 podStartE2EDuration="2m0.978666135s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:42.937346638 +0000 UTC m=+139.079848929" watchObservedRunningTime="2025-11-22 09:05:42.978666135 +0000 UTC m=+139.121168426" Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.981219 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6bbcf" event={"ID":"b99886aa-5100-4f05-a0c0-e437573bdd44","Type":"ContainerStarted","Data":"1c4656484d4eea340537e713bca6cf680aa5349e81089b2cc4c425ed7471b273"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.981252 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-6bbcf" event={"ID":"b99886aa-5100-4f05-a0c0-e437573bdd44","Type":"ContainerStarted","Data":"3821d842984074aebbc3f2f98c41134d2bd184663eb9075561960b4a0528be49"} Nov 22 09:05:42 crc kubenswrapper[4693]: I1122 09:05:42.985827 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-rv5vl"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:42.999387 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-m5ldq"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.005664 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.009259 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-sthb9"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.010325 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-hgv6m" podStartSLOduration=121.010316232 podStartE2EDuration="2m1.010316232s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.003742362 +0000 UTC m=+139.146244653" watchObservedRunningTime="2025-11-22 09:05:43.010316232 +0000 UTC m=+139.152818523" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.014159 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.018229 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7skdb"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.018266 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.018276 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-dgfk9"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.022570 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-rz6dr"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.029918 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" podStartSLOduration=121.029906013 podStartE2EDuration="2m1.029906013s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.029257334 +0000 UTC m=+139.171759625" watchObservedRunningTime="2025-11-22 09:05:43.029906013 +0000 UTC m=+139.172408305" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.037162 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-6s452"] Nov 22 09:05:43 crc kubenswrapper[4693]: W1122 09:05:43.041692 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8cd5b74f_7a92_4b0f_9846_e9afd22fc091.slice/crio-3bda24df55ea83b4a785b18dbda933d1af9095609ba6bcc04a364d5ad4ab1cdb WatchSource:0}: Error finding container 3bda24df55ea83b4a785b18dbda933d1af9095609ba6bcc04a364d5ad4ab1cdb: Status 404 returned error can't find the container with id 3bda24df55ea83b4a785b18dbda933d1af9095609ba6bcc04a364d5ad4ab1cdb Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.054758 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-f94z5"] Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.055916 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.057918 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.55790173 +0000 UTC m=+139.700404011 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.063591 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-f7n6n" podStartSLOduration=121.063574725 podStartE2EDuration="2m1.063574725s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.056179351 +0000 UTC m=+139.198681642" watchObservedRunningTime="2025-11-22 09:05:43.063574725 +0000 UTC m=+139.206077016" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.074306 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.086814 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.586785806 +0000 UTC m=+139.729288097 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.160965 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" podStartSLOduration=121.160942933 podStartE2EDuration="2m1.160942933s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.111702774 +0000 UTC m=+139.254205065" watchObservedRunningTime="2025-11-22 09:05:43.160942933 +0000 UTC m=+139.303445223" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.162424 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-5t5mq" podStartSLOduration=121.162418347 podStartE2EDuration="2m1.162418347s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.160349357 +0000 UTC m=+139.302851648" watchObservedRunningTime="2025-11-22 09:05:43.162418347 +0000 UTC m=+139.304920637" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.176723 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.177046 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.677032483 +0000 UTC m=+139.819534774 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.215596 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-c5cns" podStartSLOduration=121.215576462 podStartE2EDuration="2m1.215576462s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.214574638 +0000 UTC m=+139.357076929" watchObservedRunningTime="2025-11-22 09:05:43.215576462 +0000 UTC m=+139.358078753" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.245333 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.277912 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.278243 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.278536 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.778524794 +0000 UTC m=+139.921027085 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.285108 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:43 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:43 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:43 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.285146 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.295961 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-2z7n2" podStartSLOduration=121.295945057 podStartE2EDuration="2m1.295945057s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.266946996 +0000 UTC m=+139.409449287" watchObservedRunningTime="2025-11-22 09:05:43.295945057 +0000 UTC m=+139.438447348" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.297774 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-984ks" podStartSLOduration=121.29776552 podStartE2EDuration="2m1.29776552s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.296082846 +0000 UTC m=+139.438585137" watchObservedRunningTime="2025-11-22 09:05:43.29776552 +0000 UTC m=+139.440267811" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.379366 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.379887 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.879873375 +0000 UTC m=+140.022375667 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.386635 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-6bbcf" podStartSLOduration=6.386618537 podStartE2EDuration="6.386618537s" podCreationTimestamp="2025-11-22 09:05:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.341465512 +0000 UTC m=+139.483967804" watchObservedRunningTime="2025-11-22 09:05:43.386618537 +0000 UTC m=+139.529120828" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.387473 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-mz7f4" podStartSLOduration=121.387470068 podStartE2EDuration="2m1.387470068s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.382319214 +0000 UTC m=+139.524821505" watchObservedRunningTime="2025-11-22 09:05:43.387470068 +0000 UTC m=+139.529972359" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.464310 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7j6hm" podStartSLOduration=121.464295692 podStartE2EDuration="2m1.464295692s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.420863532 +0000 UTC m=+139.563365824" watchObservedRunningTime="2025-11-22 09:05:43.464295692 +0000 UTC m=+139.606797983" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.464389 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" podStartSLOduration=121.464385531 podStartE2EDuration="2m1.464385531s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.461604001 +0000 UTC m=+139.604106292" watchObservedRunningTime="2025-11-22 09:05:43.464385531 +0000 UTC m=+139.606887821" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.480598 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.481028 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:43.981018904 +0000 UTC m=+140.123521195 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.510902 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" podStartSLOduration=121.510891128 podStartE2EDuration="2m1.510891128s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:43.509145887 +0000 UTC m=+139.651648177" watchObservedRunningTime="2025-11-22 09:05:43.510891128 +0000 UTC m=+139.653393419" Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.581589 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.581975 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.081962834 +0000 UTC m=+140.224465125 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.685300 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.685774 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.185765228 +0000 UTC m=+140.328267519 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.786503 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.786872 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.286858468 +0000 UTC m=+140.429360759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.888090 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.888330 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.388321354 +0000 UTC m=+140.530823644 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.988795 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.988985 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.488956624 +0000 UTC m=+140.631458915 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.989117 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:43 crc kubenswrapper[4693]: E1122 09:05:43.989360 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.489346617 +0000 UTC m=+140.631848908 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.993730 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" event={"ID":"14e6c49b-951c-4c4e-bacb-7d360d8bdd59","Type":"ContainerStarted","Data":"e93852507c36a61c311df9934fd57079d99ccf30b804bd274c1613070484849d"} Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.993758 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" event={"ID":"14e6c49b-951c-4c4e-bacb-7d360d8bdd59","Type":"ContainerStarted","Data":"6d0ec84e5118b45319df9063d0909a3f305f600eb5da3cf5e1c1b669f90fc452"} Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.996057 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" event={"ID":"38a5280f-5933-40af-9c61-41f4766fc538","Type":"ContainerStarted","Data":"64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b"} Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.996088 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" event={"ID":"38a5280f-5933-40af-9c61-41f4766fc538","Type":"ContainerStarted","Data":"5a4edb9ba9d37f1513ec801ff928016f1556803424b28a9c4ad9e02daf6ad81a"} Nov 22 09:05:43 crc kubenswrapper[4693]: I1122 09:05:43.997220 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.000800 4693 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-6s452 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" start-of-body= Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.000830 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" podUID="38a5280f-5933-40af-9c61-41f4766fc538" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.15:6443/healthz\": dial tcp 10.217.0.15:6443: connect: connection refused" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.002347 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-f94z5" event={"ID":"987c49b0-614e-4a5d-9cf7-375e13485e55","Type":"ContainerStarted","Data":"ee954094c81fe9b1c90ce4aa7bd4b30408472631f7edb64298a1954bd60a276b"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.002373 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-f94z5" event={"ID":"987c49b0-614e-4a5d-9cf7-375e13485e55","Type":"ContainerStarted","Data":"dbcc84c4b7bcc446c1e24b01e2c35573c9db57c1498b87842364208f841e9778"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.008677 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" event={"ID":"f6d86049-ce66-4900-bc42-b1ac6864e79a","Type":"ContainerStarted","Data":"7f4670ae3c7c7e2a244ef45d3963faee961162658524a15031b41cc75630b08a"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.008696 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" event={"ID":"f6d86049-ce66-4900-bc42-b1ac6864e79a","Type":"ContainerStarted","Data":"d94be8fbd39ddafe3c2b3bd8a44c052074a0c1470c972e176e0e905b0d4ef3f5"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.013173 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5ldq" event={"ID":"8cd5b74f-7a92-4b0f-9846-e9afd22fc091","Type":"ContainerStarted","Data":"e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.013209 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5ldq" event={"ID":"8cd5b74f-7a92-4b0f-9846-e9afd22fc091","Type":"ContainerStarted","Data":"3bda24df55ea83b4a785b18dbda933d1af9095609ba6bcc04a364d5ad4ab1cdb"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.019615 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" podStartSLOduration=122.019605108 podStartE2EDuration="2m2.019605108s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.019130375 +0000 UTC m=+140.161632665" watchObservedRunningTime="2025-11-22 09:05:44.019605108 +0000 UTC m=+140.162107398" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.049407 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-m5ldq" podStartSLOduration=123.049390919 podStartE2EDuration="2m3.049390919s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.04717359 +0000 UTC m=+140.189675881" watchObservedRunningTime="2025-11-22 09:05:44.049390919 +0000 UTC m=+140.191893210" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.059353 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" event={"ID":"6a3a6a12-ab76-4742-91b4-af9ef4b70181","Type":"ContainerStarted","Data":"d3b12b2e84685cbc5d14dff31c3fc45a364178c65a5ddb9d9993a9c9b19af3e1"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.063140 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-gqsrq" podStartSLOduration=122.063125993 podStartE2EDuration="2m2.063125993s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.062468526 +0000 UTC m=+140.204970817" watchObservedRunningTime="2025-11-22 09:05:44.063125993 +0000 UTC m=+140.205628284" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.094633 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.094981 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.594960236 +0000 UTC m=+140.737462528 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.095314 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.099116 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.599097575 +0000 UTC m=+140.741599867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.100269 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-m84vc" event={"ID":"17774a78-d5ad-4d77-8426-ae730dc29981","Type":"ContainerStarted","Data":"1d94b2be1a4452ff3c33d4213fd086784b02926a0fc01763b88b2547d8a207c6"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.102505 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-f94z5" podStartSLOduration=7.102491165 podStartE2EDuration="7.102491165s" podCreationTimestamp="2025-11-22 09:05:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.100565074 +0000 UTC m=+140.243067365" watchObservedRunningTime="2025-11-22 09:05:44.102491165 +0000 UTC m=+140.244993457" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.137566 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" event={"ID":"6ba8530d-18bd-4021-8187-6c716bc87a32","Type":"ContainerStarted","Data":"dfc26093727b8f92d963a229244b4cefce8f57b35244fe087262c5ad611f4027"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.137627 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" event={"ID":"6ba8530d-18bd-4021-8187-6c716bc87a32","Type":"ContainerStarted","Data":"c630a55feb1022a1449fe70adc50e9f15050aee2b951f03e34715b13f4c015c6"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.138610 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.146707 4693 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-7skdb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.146759 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.198729 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.199068 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.699038178 +0000 UTC m=+140.841540469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.205747 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" podStartSLOduration=122.205731162 podStartE2EDuration="2m2.205731162s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.167457442 +0000 UTC m=+140.309959733" watchObservedRunningTime="2025-11-22 09:05:44.205731162 +0000 UTC m=+140.348233453" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.206945 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" podStartSLOduration=122.206939604 podStartE2EDuration="2m2.206939604s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.206472376 +0000 UTC m=+140.348974667" watchObservedRunningTime="2025-11-22 09:05:44.206939604 +0000 UTC m=+140.349441895" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.242916 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" event={"ID":"49503ce5-c224-4967-b968-0480264dca16","Type":"ContainerStarted","Data":"e737872b29325fb830dd672efee4c045454d709014d6e1f4a17a1a700cf1ddc5"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243010 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" event={"ID":"49503ce5-c224-4967-b968-0480264dca16","Type":"ContainerStarted","Data":"7c684832c06b3bcaa38cd89d4dee5f12924f1dfc441f5d9c92864d502d02a7dc"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243024 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-9blmp" event={"ID":"49503ce5-c224-4967-b968-0480264dca16","Type":"ContainerStarted","Data":"b29d33804320cf9d1b6029a6847df4cfaa4efb27ca86dfcfc13388b974e2fc79"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243033 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rv5vl" event={"ID":"2317bf17-7606-4b48-b58d-800167e8131f","Type":"ContainerStarted","Data":"8f74379b528d8c38aa0a19bb3b175467578008a512a9cdde200c7710ae6b4682"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243045 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rv5vl" event={"ID":"2317bf17-7606-4b48-b58d-800167e8131f","Type":"ContainerStarted","Data":"7ee64e1b1d1a5531e80b6806edbf41f7499039ae452ba305de75e6c3527bf02f"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243054 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" event={"ID":"0b941991-050f-44f9-a7e8-eb0b1ae14ade","Type":"ContainerStarted","Data":"443f2ef8105483e4eaf19d080d96d7fad75e1349656290a83cbd49cc44273984"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243089 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243099 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" event={"ID":"0b941991-050f-44f9-a7e8-eb0b1ae14ade","Type":"ContainerStarted","Data":"76bea947406a2fc4f8d4b309de4ce7c93d784083325ac04dc8ed8cbaedd5fd54"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.243107 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" event={"ID":"5683ccd7-f245-4a19-aff7-686b294455f9","Type":"ContainerStarted","Data":"2f1a770d37fc13b7a3b8371f1e74a32d1e7740725f5fd13a47b5d6f3efff2f9c"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.259088 4693 patch_prober.go:28] interesting pod/console-operator-58897d9998-wg5vx container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.259169 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" podUID="0b941991-050f-44f9-a7e8-eb0b1ae14ade" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.9:8443/readyz\": dial tcp 10.217.0.9:8443: connect: connection refused" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.259921 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-jxzb9" event={"ID":"79e553de-810b-486a-a739-8d9a5ce8e966","Type":"ContainerStarted","Data":"30b8d9513e9a7840ec5a2074ee31d3e1306d7a4a03052a3aa56d156f2d26776b"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.288190 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:44 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:44 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:44 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.288450 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.296117 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" event={"ID":"f0177d6a-4233-4759-9f66-facba4d65adf","Type":"ContainerStarted","Data":"c817197455ef9ef0df647f8827051f9042898008f3214d8ab37727af6fb53d9d"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.296161 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" event={"ID":"f0177d6a-4233-4759-9f66-facba4d65adf","Type":"ContainerStarted","Data":"6377ff45be6d94a4b21e1fbdb6c104f12454b3463c252d84b44aeffa89f48321"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.297925 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" event={"ID":"be6d2d6c-b568-4b92-915d-20dd0d7f233c","Type":"ContainerStarted","Data":"eec238100f323ac329b58336d9496ba5ee98a3d3b4337a619c307a68977768e2"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.297971 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" event={"ID":"be6d2d6c-b568-4b92-915d-20dd0d7f233c","Type":"ContainerStarted","Data":"5bc433f8246f9e55113c1d2dbd341a54acb9098b4b57312588213f03f4e6f47e"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.300460 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.301766 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.801756074 +0000 UTC m=+140.944258364 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.326114 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" event={"ID":"aa0bee87-fb16-49ea-9680-1ab3ebc1caed","Type":"ContainerStarted","Data":"66717249cd8c953362aed407a36d62f69707032b48155217d1e30b4d6f3cb6c3"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.326158 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" event={"ID":"aa0bee87-fb16-49ea-9680-1ab3ebc1caed","Type":"ContainerStarted","Data":"3ce333560df0be88cbbdc4f6c0d8b8b594cf34c8913416f3e665c8864ada4750"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.328942 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.329263 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.347672 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.381060 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" event={"ID":"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6","Type":"ContainerStarted","Data":"267c9b19da1ebe475c6db37188c4ba837165bd0682d2ac3533356ec371c6347b"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.381092 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" event={"ID":"bf4475aa-6a09-4f8d-a182-d575ffb0f9f6","Type":"ContainerStarted","Data":"73cafcfaaee9609832d025628e200c3346f916e9b484e4c7ef821f3bd3bbcb9d"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.400913 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.402694 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:44.902669646 +0000 UTC m=+141.045171937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.444559 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" event={"ID":"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c","Type":"ContainerStarted","Data":"7b049cc8104e9a8079295a7b2646a1ad9c4800722e1ee1821df58ed8d72df0ce"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.466210 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" event={"ID":"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e","Type":"ContainerStarted","Data":"3a453d3f49527e8f6c4160ce23e02c379403b95bfc685b6865e6b91a0604fba8"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.466257 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" event={"ID":"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e","Type":"ContainerStarted","Data":"d0537d3739d0d7f08eed05439695af80d2a09ed7ce3fba8e688ad7243af46685"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.489674 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" event={"ID":"6875be01-c5cd-437a-a4aa-878ba5dbe400","Type":"ContainerStarted","Data":"6209aafe9ba2fa9e2d7977a3370705cca12e9e574e4eb591b6c76c0609f0070f"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.502284 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.502516 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.002505754 +0000 UTC m=+141.145008045 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.503601 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" event={"ID":"10fe4917-ff80-4a00-be3d-24398ddde1ae","Type":"ContainerStarted","Data":"43b955fc0853102946430743deb6eb88e1692860431631b9f30bd6223f55e239"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.556317 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" event={"ID":"24b3e483-5f23-4878-943e-efe9d09663ca","Type":"ContainerStarted","Data":"298c342b65a51d1f7ea2f00e7911a94fe339810791d06ca956affd2e9a657b52"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.556346 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" event={"ID":"24b3e483-5f23-4878-943e-efe9d09663ca","Type":"ContainerStarted","Data":"b16f6c881ec9a381bcdf0d3b0e0fc964b911e51663d3b61491311bd4048b6b78"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.589172 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" event={"ID":"d1c24056-5be6-4718-9e38-e74af7d815db","Type":"ContainerStarted","Data":"c19eb8356e3958d2dfc43182dd641179d1d12980a02a512217c125214ada4246"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.590050 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.590071 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" event={"ID":"d1c24056-5be6-4718-9e38-e74af7d815db","Type":"ContainerStarted","Data":"188dee88c61db337bc74af075273714f5f476048557eaeb742409dfe7a00e74e"} Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.590610 4693 patch_prober.go:28] interesting pod/downloads-7954f5f757-984ks container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.590649 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-984ks" podUID="4ee0f119-a41c-44c4-8057-a8dd22d6b5c3" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.591927 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" podUID="24790c77-ff73-4528-a742-ab163edd8ac8" containerName="controller-manager" containerID="cri-o://65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35" gracePeriod=30 Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.603889 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-w6rvg" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.604815 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.605107 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.605968 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.105954543 +0000 UTC m=+141.248456834 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.606754 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-nqqnd" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.610944 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-2d96p" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.712653 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.714119 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.214107506 +0000 UTC m=+141.356609798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.804702 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" podStartSLOduration=123.804681399 podStartE2EDuration="2m3.804681399s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.803999488 +0000 UTC m=+140.946501778" watchObservedRunningTime="2025-11-22 09:05:44.804681399 +0000 UTC m=+140.947183690" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.815453 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.816010 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.315992195 +0000 UTC m=+141.458494486 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.859261 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" podStartSLOduration=123.85923055 podStartE2EDuration="2m3.85923055s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.828093808 +0000 UTC m=+140.970596100" watchObservedRunningTime="2025-11-22 09:05:44.85923055 +0000 UTC m=+141.001732841" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.860465 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" podStartSLOduration=122.860461013 podStartE2EDuration="2m2.860461013s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.852948468 +0000 UTC m=+140.995450759" watchObservedRunningTime="2025-11-22 09:05:44.860461013 +0000 UTC m=+141.002963304" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.917220 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-fdrlh" podStartSLOduration=122.917194691 podStartE2EDuration="2m2.917194691s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.873917693 +0000 UTC m=+141.016419984" watchObservedRunningTime="2025-11-22 09:05:44.917194691 +0000 UTC m=+141.059696982" Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.917878 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:44 crc kubenswrapper[4693]: E1122 09:05:44.918249 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.418233274 +0000 UTC m=+141.560735566 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:44 crc kubenswrapper[4693]: I1122 09:05:44.965904 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-bf9vt" podStartSLOduration=122.965885167 podStartE2EDuration="2m2.965885167s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:44.965158972 +0000 UTC m=+141.107661262" watchObservedRunningTime="2025-11-22 09:05:44.965885167 +0000 UTC m=+141.108387458" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.018983 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.019362 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.519348185 +0000 UTC m=+141.661850476 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.031208 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-fkplq" podStartSLOduration=123.03118498 podStartE2EDuration="2m3.03118498s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.004162363 +0000 UTC m=+141.146664654" watchObservedRunningTime="2025-11-22 09:05:45.03118498 +0000 UTC m=+141.173687271" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.062974 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-l4bpn" podStartSLOduration=123.062962066 podStartE2EDuration="2m3.062962066s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.059094384 +0000 UTC m=+141.201596695" watchObservedRunningTime="2025-11-22 09:05:45.062962066 +0000 UTC m=+141.205464358" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.079458 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" podStartSLOduration=124.079440528 podStartE2EDuration="2m4.079440528s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.078394221 +0000 UTC m=+141.220896512" watchObservedRunningTime="2025-11-22 09:05:45.079440528 +0000 UTC m=+141.221942819" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.103393 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-k28v7" podStartSLOduration=123.103378695 podStartE2EDuration="2m3.103378695s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.102148913 +0000 UTC m=+141.244651204" watchObservedRunningTime="2025-11-22 09:05:45.103378695 +0000 UTC m=+141.245880986" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.120776 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.121227 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.621211433 +0000 UTC m=+141.763713724 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.143435 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.175496 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4ck6n"] Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.175700 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="24790c77-ff73-4528-a742-ab163edd8ac8" containerName="controller-manager" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.175718 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="24790c77-ff73-4528-a742-ab163edd8ac8" containerName="controller-manager" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.175819 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="24790c77-ff73-4528-a742-ab163edd8ac8" containerName="controller-manager" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.176266 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.182988 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4ck6n"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.222906 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223000 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-proxy-ca-bundles\") pod \"24790c77-ff73-4528-a742-ab163edd8ac8\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223031 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-client-ca\") pod \"24790c77-ff73-4528-a742-ab163edd8ac8\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223064 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24790c77-ff73-4528-a742-ab163edd8ac8-serving-cert\") pod \"24790c77-ff73-4528-a742-ab163edd8ac8\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223091 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5ncsx\" (UniqueName: \"kubernetes.io/projected/24790c77-ff73-4528-a742-ab163edd8ac8-kube-api-access-5ncsx\") pod \"24790c77-ff73-4528-a742-ab163edd8ac8\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223113 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-config\") pod \"24790c77-ff73-4528-a742-ab163edd8ac8\" (UID: \"24790c77-ff73-4528-a742-ab163edd8ac8\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223349 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7f9670-71e1-4be0-955b-7fdf8c953c20-serving-cert\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223389 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-client-ca\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223428 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-config\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.223457 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqtsz\" (UniqueName: \"kubernetes.io/projected/5f7f9670-71e1-4be0-955b-7fdf8c953c20-kube-api-access-mqtsz\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.223590 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.723576315 +0000 UTC m=+141.866078605 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.225011 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "24790c77-ff73-4528-a742-ab163edd8ac8" (UID: "24790c77-ff73-4528-a742-ab163edd8ac8"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.225422 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-client-ca" (OuterVolumeSpecName: "client-ca") pod "24790c77-ff73-4528-a742-ab163edd8ac8" (UID: "24790c77-ff73-4528-a742-ab163edd8ac8"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.225534 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-config" (OuterVolumeSpecName: "config") pod "24790c77-ff73-4528-a742-ab163edd8ac8" (UID: "24790c77-ff73-4528-a742-ab163edd8ac8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.236365 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/24790c77-ff73-4528-a742-ab163edd8ac8-kube-api-access-5ncsx" (OuterVolumeSpecName: "kube-api-access-5ncsx") pod "24790c77-ff73-4528-a742-ab163edd8ac8" (UID: "24790c77-ff73-4528-a742-ab163edd8ac8"). InnerVolumeSpecName "kube-api-access-5ncsx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.241975 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/24790c77-ff73-4528-a742-ab163edd8ac8-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "24790c77-ff73-4528-a742-ab163edd8ac8" (UID: "24790c77-ff73-4528-a742-ab163edd8ac8"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.278650 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:45 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:45 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:45 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.278701 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325120 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325172 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325195 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7f9670-71e1-4be0-955b-7fdf8c953c20-serving-cert\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325216 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-client-ca\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325254 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-config\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325285 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqtsz\" (UniqueName: \"kubernetes.io/projected/5f7f9670-71e1-4be0-955b-7fdf8c953c20-kube-api-access-mqtsz\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325396 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325417 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325427 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24790c77-ff73-4528-a742-ab163edd8ac8-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325436 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5ncsx\" (UniqueName: \"kubernetes.io/projected/24790c77-ff73-4528-a742-ab163edd8ac8-kube-api-access-5ncsx\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.325448 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24790c77-ff73-4528-a742-ab163edd8ac8-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.325987 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.825973507 +0000 UTC m=+141.968475798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.327677 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.327699 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-client-ca\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.328251 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-config\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.334618 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-f6rdl"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.335133 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7f9670-71e1-4be0-955b-7fdf8c953c20-serving-cert\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.336726 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.347979 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.351524 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqtsz\" (UniqueName: \"kubernetes.io/projected/5f7f9670-71e1-4be0-955b-7fdf8c953c20-kube-api-access-mqtsz\") pod \"controller-manager-879f6c89f-4ck6n\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.353982 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f6rdl"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.426499 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.426816 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-catalog-content\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.426879 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r7cw\" (UniqueName: \"kubernetes.io/projected/acb14103-18ad-40be-b7e1-2fe99282f86c-kube-api-access-8r7cw\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.426899 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-utilities\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.427049 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:45.927033595 +0000 UTC m=+142.069535886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.511990 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.528608 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-catalog-content\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.528656 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.528678 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r7cw\" (UniqueName: \"kubernetes.io/projected/acb14103-18ad-40be-b7e1-2fe99282f86c-kube-api-access-8r7cw\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.528696 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-utilities\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.529075 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-catalog-content\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.528656 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-hwwtg"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.529307 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-utilities\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.529736 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.029725983 +0000 UTC m=+142.172228274 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.530014 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.532128 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.549585 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r7cw\" (UniqueName: \"kubernetes.io/projected/acb14103-18ad-40be-b7e1-2fe99282f86c-kube-api-access-8r7cw\") pod \"certified-operators-f6rdl\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.567190 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hwwtg"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.629399 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.629591 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.129554325 +0000 UTC m=+142.272056616 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.629800 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-utilities\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.629834 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-catalog-content\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.629931 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2zm5\" (UniqueName: \"kubernetes.io/projected/79fc98da-aee2-436c-823b-5e608446dc29-kube-api-access-q2zm5\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.629990 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.630274 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.130262136 +0000 UTC m=+142.272764427 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.648416 4693 generic.go:334] "Generic (PLEG): container finished" podID="24790c77-ff73-4528-a742-ab163edd8ac8" containerID="65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35" exitCode=0 Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.648508 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" event={"ID":"24790c77-ff73-4528-a742-ab163edd8ac8","Type":"ContainerDied","Data":"65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.648549 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" event={"ID":"24790c77-ff73-4528-a742-ab163edd8ac8","Type":"ContainerDied","Data":"235c5f62adbd87296ff1348b625c28be64a50f40567c7b2c18ada111282b52c1"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.648571 4693 scope.go:117] "RemoveContainer" containerID="65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.648731 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-dgnsw" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.658157 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" event={"ID":"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c","Type":"ContainerStarted","Data":"3072b80daa6c5175cfbd05fe3862e8f434f8157546b78bd05da820171f720449"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.658209 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" event={"ID":"f31ce9a0-7f51-4fc9-a5c3-5afc942a4f9c","Type":"ContainerStarted","Data":"2bd7216bb06c7dccf3b44e0011413481596f3773feec571613d17b3acd416494"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.664051 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" event={"ID":"5683ccd7-f245-4a19-aff7-686b294455f9","Type":"ContainerStarted","Data":"0cbaf2164fb8cff199bc6659cbdd009e90383c2630bfa310281344017f00a610"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.664088 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" event={"ID":"5683ccd7-f245-4a19-aff7-686b294455f9","Type":"ContainerStarted","Data":"5f8e7cc5c56b724ff2d2b36b63c4aa912ff20b1d71d49740c596dde0fc66d5d4"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.667248 4693 scope.go:117] "RemoveContainer" containerID="65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.667775 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35\": container with ID starting with 65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35 not found: ID does not exist" containerID="65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.667813 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35"} err="failed to get container status \"65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35\": rpc error: code = NotFound desc = could not find container \"65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35\": container with ID starting with 65fefe0a32f421b81a95d17c4cb05b5a87ad2040d58937932e94cfba3c6d7d35 not found: ID does not exist" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.668417 4693 generic.go:334] "Generic (PLEG): container finished" podID="3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" containerID="3a453d3f49527e8f6c4160ce23e02c379403b95bfc685b6865e6b91a0604fba8" exitCode=0 Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.668571 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" event={"ID":"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e","Type":"ContainerDied","Data":"3a453d3f49527e8f6c4160ce23e02c379403b95bfc685b6865e6b91a0604fba8"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.676253 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.678765 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-rz6dr" event={"ID":"6875be01-c5cd-437a-a4aa-878ba5dbe400","Type":"ContainerStarted","Data":"c1eef8255cb263f3647bda3836717f67ba8d1550905069341c5a7b7a99820735"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.680731 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" event={"ID":"10fe4917-ff80-4a00-be3d-24398ddde1ae","Type":"ContainerStarted","Data":"a18f0faca3ede6ee199f09eafc2801f8e46a401747371a63e9a97a91bef4b69e"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.680789 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" event={"ID":"10fe4917-ff80-4a00-be3d-24398ddde1ae","Type":"ContainerStarted","Data":"1e5ed762438cb5f7aebfc00f996587e76568531e3a92905955395b17053d71ad"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.687898 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" event={"ID":"6a3a6a12-ab76-4742-91b4-af9ef4b70181","Type":"ContainerStarted","Data":"6267421b49f2a315cabaf4749801eb1eece2198368393d3bb811cf86d446c699"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.703138 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-rv5vl" event={"ID":"2317bf17-7606-4b48-b58d-800167e8131f","Type":"ContainerStarted","Data":"9a03d0b6808b57973f97c21ff89e1c61a223e9d96ec043a1cc889a385acfe817"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.703867 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.715738 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-dgfk9" podStartSLOduration=123.715727944 podStartE2EDuration="2m3.715727944s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.688101673 +0000 UTC m=+141.830603964" watchObservedRunningTime="2025-11-22 09:05:45.715727944 +0000 UTC m=+141.858230235" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.728047 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" event={"ID":"14e6c49b-951c-4c4e-bacb-7d360d8bdd59","Type":"ContainerStarted","Data":"4b3dcf9554bb34dbac6b2f3cb7ba3bd869173a1d6f0ea6c2623b9c06a9f28c7b"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.735006 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.735202 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-utilities\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.735252 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-catalog-content\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.735371 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2zm5\" (UniqueName: \"kubernetes.io/projected/79fc98da-aee2-436c-823b-5e608446dc29-kube-api-access-q2zm5\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.735715 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-rssjc"] Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.736466 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.236450767 +0000 UTC m=+142.378953058 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.736651 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.736774 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-utilities\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.739803 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-catalog-content\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.753931 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dgnsw"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.756114 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rssjc"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.757479 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-dgnsw"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.762072 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2zm5\" (UniqueName: \"kubernetes.io/projected/79fc98da-aee2-436c-823b-5e608446dc29-kube-api-access-q2zm5\") pod \"community-operators-hwwtg\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.762550 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-sthb9" podStartSLOduration=123.76253595 podStartE2EDuration="2m3.76253595s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.762388433 +0000 UTC m=+141.904890724" watchObservedRunningTime="2025-11-22 09:05:45.76253595 +0000 UTC m=+141.905038242" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.781907 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-9fflj" event={"ID":"aa0bee87-fb16-49ea-9680-1ab3ebc1caed","Type":"ContainerStarted","Data":"b2eae7eea675eb085f862c8d7158fa3302f268eea5b03aa6ebc33b2a1c2870eb"} Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.782579 4693 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-7skdb container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" start-of-body= Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.782619 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.32:8080/healthz\": dial tcp 10.217.0.32:8080: connect: connection refused" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.805335 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" podStartSLOduration=123.805316885 podStartE2EDuration="2m3.805316885s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.800871106 +0000 UTC m=+141.943373398" watchObservedRunningTime="2025-11-22 09:05:45.805316885 +0000 UTC m=+141.947819176" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.808561 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.832927 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-wg5vx" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.848603 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-catalog-content\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.849259 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-utilities\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.849309 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.849427 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgvpb\" (UniqueName: \"kubernetes.io/projected/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-kube-api-access-fgvpb\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.871610 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-rv5vl" podStartSLOduration=8.871594817 podStartE2EDuration="8.871594817s" podCreationTimestamp="2025-11-22 09:05:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.843583172 +0000 UTC m=+141.986085463" watchObservedRunningTime="2025-11-22 09:05:45.871594817 +0000 UTC m=+142.014097109" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.894367 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.393939039 +0000 UTC m=+142.536441330 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.896903 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.897407 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.899903 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.900558 4693 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.900789 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-j7fbj" podStartSLOduration=123.900780381 podStartE2EDuration="2m3.900780381s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:45.900717002 +0000 UTC m=+142.043219293" watchObservedRunningTime="2025-11-22 09:05:45.900780381 +0000 UTC m=+142.043282672" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.942532 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-l4snx"] Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.958137 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.958375 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.958477 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-catalog-content\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.958553 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-utilities\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.958586 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgvpb\" (UniqueName: \"kubernetes.io/projected/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-kube-api-access-fgvpb\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: E1122 09:05:45.958664 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.458651027 +0000 UTC m=+142.601153318 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.959423 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-catalog-content\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.960968 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-utilities\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:45 crc kubenswrapper[4693]: I1122 09:05:45.965998 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4snx"] Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.004132 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgvpb\" (UniqueName: \"kubernetes.io/projected/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-kube-api-access-fgvpb\") pod \"certified-operators-rssjc\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.015896 4693 patch_prober.go:28] interesting pod/apiserver-76f77b778f-m97gh container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]log ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]etcd ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/generic-apiserver-start-informers ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/max-in-flight-filter ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 22 09:05:46 crc kubenswrapper[4693]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 22 09:05:46 crc kubenswrapper[4693]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/project.openshift.io-projectcache ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/openshift.io-startinformers ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 22 09:05:46 crc kubenswrapper[4693]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 22 09:05:46 crc kubenswrapper[4693]: livez check failed Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.015934 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" podUID="6a3a6a12-ab76-4742-91b4-af9ef4b70181" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.061513 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-catalog-content\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.061560 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-utilities\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.061611 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-srrw7\" (UniqueName: \"kubernetes.io/projected/9ed69228-6c61-4355-89a3-0f4f13306faf-kube-api-access-srrw7\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.061631 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:46 crc kubenswrapper[4693]: E1122 09:05:46.061913 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.561902675 +0000 UTC m=+142.704404966 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.063934 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-f6rdl"] Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.072772 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.104029 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4ck6n"] Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.162803 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.162962 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-srrw7\" (UniqueName: \"kubernetes.io/projected/9ed69228-6c61-4355-89a3-0f4f13306faf-kube-api-access-srrw7\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.163030 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-catalog-content\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.163064 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-utilities\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: E1122 09:05:46.163446 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.663420725 +0000 UTC m=+142.805923016 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.163905 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-catalog-content\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.164056 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-utilities\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.164731 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="24790c77-ff73-4528-a742-ab163edd8ac8" path="/var/lib/kubelet/pods/24790c77-ff73-4528-a742-ab163edd8ac8/volumes" Nov 22 09:05:46 crc kubenswrapper[4693]: W1122 09:05:46.172858 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5f7f9670_71e1_4be0_955b_7fdf8c953c20.slice/crio-ac4ecd2e0ffe9d8b444cfa3cb70d41eac82d1f5326fe9533584d860c0ec73b08 WatchSource:0}: Error finding container ac4ecd2e0ffe9d8b444cfa3cb70d41eac82d1f5326fe9533584d860c0ec73b08: Status 404 returned error can't find the container with id ac4ecd2e0ffe9d8b444cfa3cb70d41eac82d1f5326fe9533584d860c0ec73b08 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.179734 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-srrw7\" (UniqueName: \"kubernetes.io/projected/9ed69228-6c61-4355-89a3-0f4f13306faf-kube-api-access-srrw7\") pod \"community-operators-l4snx\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.197299 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-hwwtg"] Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.264423 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:46 crc kubenswrapper[4693]: E1122 09:05:46.264890 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-22 09:05:46.764877588 +0000 UTC m=+142.907379880 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-s8j86" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.280369 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:46 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:46 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:46 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.280414 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.283146 4693 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-22T09:05:45.900574414Z","Handler":null,"Name":""} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.292760 4693 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.292794 4693 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.304606 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.332477 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-rssjc"] Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.365057 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.378344 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.466829 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.474325 4693 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.474353 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.517506 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-s8j86\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.545813 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-l4snx"] Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.680356 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.790957 4693 generic.go:334] "Generic (PLEG): container finished" podID="79fc98da-aee2-436c-823b-5e608446dc29" containerID="28fa0d7127de4219ebeb9e74c61e6a67b8417e2d3f588bcad115261a1ef70adf" exitCode=0 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.791035 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwwtg" event={"ID":"79fc98da-aee2-436c-823b-5e608446dc29","Type":"ContainerDied","Data":"28fa0d7127de4219ebeb9e74c61e6a67b8417e2d3f588bcad115261a1ef70adf"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.791069 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwwtg" event={"ID":"79fc98da-aee2-436c-823b-5e608446dc29","Type":"ContainerStarted","Data":"f5108ac55a6b26e0535e33bb0417ab364ab8da2737e6d25f14bbfa075e421d68"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.792890 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerID="0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71" exitCode=0 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.792940 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4snx" event={"ID":"9ed69228-6c61-4355-89a3-0f4f13306faf","Type":"ContainerDied","Data":"0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.792956 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4snx" event={"ID":"9ed69228-6c61-4355-89a3-0f4f13306faf","Type":"ContainerStarted","Data":"ac9f39a3f0c958b1a4af759823bf1137bd9bd4dfe911c887034858e6fa95e6c9"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.794163 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.797135 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f6rdl" event={"ID":"acb14103-18ad-40be-b7e1-2fe99282f86c","Type":"ContainerDied","Data":"a6c6300b699597cb27041c139639f5c99e0f1ab84c27576f35f376f859afb0cc"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.797951 4693 generic.go:334] "Generic (PLEG): container finished" podID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerID="a6c6300b699597cb27041c139639f5c99e0f1ab84c27576f35f376f859afb0cc" exitCode=0 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.798167 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f6rdl" event={"ID":"acb14103-18ad-40be-b7e1-2fe99282f86c","Type":"ContainerStarted","Data":"b9a9d459cbfb8f3ca894d2d90a31cf3c57d554537ae2243c826f1ca439faec4e"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.801020 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" event={"ID":"5f7f9670-71e1-4be0-955b-7fdf8c953c20","Type":"ContainerStarted","Data":"2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.801051 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" event={"ID":"5f7f9670-71e1-4be0-955b-7fdf8c953c20","Type":"ContainerStarted","Data":"ac4ecd2e0ffe9d8b444cfa3cb70d41eac82d1f5326fe9533584d860c0ec73b08"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.801724 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.803657 4693 generic.go:334] "Generic (PLEG): container finished" podID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerID="993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7" exitCode=0 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.803926 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rssjc" event={"ID":"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e","Type":"ContainerDied","Data":"993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.803951 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rssjc" event={"ID":"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e","Type":"ContainerStarted","Data":"b198cd54b4ea20661c797ae7b09aa2b6d878d3a54f6e783a4d1e3f16c3628a9f"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.807475 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.814739 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" event={"ID":"10fe4917-ff80-4a00-be3d-24398ddde1ae","Type":"ContainerStarted","Data":"9d9fa074283845729545494159e3d392ebb54fc92335cb9f51b499802631db6f"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.814794 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" event={"ID":"10fe4917-ff80-4a00-be3d-24398ddde1ae","Type":"ContainerStarted","Data":"bf0f5fa1322d6cf328db0a81819f8f672b2be33064b5f6cbf20f71de0f725490"} Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.827276 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" podStartSLOduration=3.827260003 podStartE2EDuration="3.827260003s" podCreationTimestamp="2025-11-22 09:05:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:46.822831967 +0000 UTC m=+142.965334258" watchObservedRunningTime="2025-11-22 09:05:46.827260003 +0000 UTC m=+142.969762284" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.847597 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-s8j86"] Nov 22 09:05:46 crc kubenswrapper[4693]: W1122 09:05:46.852374 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcf82f0b1_de6b_449a_be90_f76c217b315b.slice/crio-7d105b8933b19a155be04998a638dfb91612ee46b767b4dc9d7f95da1008a785 WatchSource:0}: Error finding container 7d105b8933b19a155be04998a638dfb91612ee46b767b4dc9d7f95da1008a785: Status 404 returned error can't find the container with id 7d105b8933b19a155be04998a638dfb91612ee46b767b4dc9d7f95da1008a785 Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.895873 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-wwrvj" podStartSLOduration=9.895798756 podStartE2EDuration="9.895798756s" podCreationTimestamp="2025-11-22 09:05:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:46.887400837 +0000 UTC m=+143.029903127" watchObservedRunningTime="2025-11-22 09:05:46.895798756 +0000 UTC m=+143.038301048" Nov 22 09:05:46 crc kubenswrapper[4693]: I1122 09:05:46.985197 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.083351 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-config-volume\") pod \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.083690 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-secret-volume\") pod \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.083782 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-config-volume" (OuterVolumeSpecName: "config-volume") pod "3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" (UID: "3ec0a6f6-e561-4b7d-89db-7bb147f8f21e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.083794 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26lxl\" (UniqueName: \"kubernetes.io/projected/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-kube-api-access-26lxl\") pod \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\" (UID: \"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e\") " Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.084063 4693 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.088787 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" (UID: "3ec0a6f6-e561-4b7d-89db-7bb147f8f21e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.088912 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-kube-api-access-26lxl" (OuterVolumeSpecName: "kube-api-access-26lxl") pod "3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" (UID: "3ec0a6f6-e561-4b7d-89db-7bb147f8f21e"). InnerVolumeSpecName "kube-api-access-26lxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.185193 4693 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.185219 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26lxl\" (UniqueName: \"kubernetes.io/projected/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e-kube-api-access-26lxl\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.279502 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:47 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:47 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:47 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.279588 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.531204 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4rbw9"] Nov 22 09:05:47 crc kubenswrapper[4693]: E1122 09:05:47.531396 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" containerName="collect-profiles" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.531412 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" containerName="collect-profiles" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.531500 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" containerName="collect-profiles" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.532145 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.533868 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.542863 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rbw9"] Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.589877 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-utilities\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.590001 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4b795\" (UniqueName: \"kubernetes.io/projected/7684c092-86da-49ca-97d4-dfcebb032adf-kube-api-access-4b795\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.590060 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-catalog-content\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.691707 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-utilities\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.691808 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4b795\" (UniqueName: \"kubernetes.io/projected/7684c092-86da-49ca-97d4-dfcebb032adf-kube-api-access-4b795\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.691860 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-catalog-content\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.693051 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-catalog-content\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.694581 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-utilities\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.718373 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4b795\" (UniqueName: \"kubernetes.io/projected/7684c092-86da-49ca-97d4-dfcebb032adf-kube-api-access-4b795\") pod \"redhat-marketplace-4rbw9\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.825453 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.825450 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf" event={"ID":"3ec0a6f6-e561-4b7d-89db-7bb147f8f21e","Type":"ContainerDied","Data":"d0537d3739d0d7f08eed05439695af80d2a09ed7ce3fba8e688ad7243af46685"} Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.825514 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d0537d3739d0d7f08eed05439695af80d2a09ed7ce3fba8e688ad7243af46685" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.830013 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" event={"ID":"cf82f0b1-de6b-449a-be90-f76c217b315b","Type":"ContainerStarted","Data":"4491ba63e95dbfc53749d56d31d08e28cc3ad6a3da767a752b305ae2a166a411"} Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.830080 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" event={"ID":"cf82f0b1-de6b-449a-be90-f76c217b315b","Type":"ContainerStarted","Data":"7d105b8933b19a155be04998a638dfb91612ee46b767b4dc9d7f95da1008a785"} Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.844772 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.857375 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" podStartSLOduration=125.857358624 podStartE2EDuration="2m5.857358624s" podCreationTimestamp="2025-11-22 09:03:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:47.85655868 +0000 UTC m=+143.999060971" watchObservedRunningTime="2025-11-22 09:05:47.857358624 +0000 UTC m=+143.999860915" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.931535 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-2d6dr"] Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.932798 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.934809 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2d6dr"] Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.996700 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-catalog-content\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.996776 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mq5rn\" (UniqueName: \"kubernetes.io/projected/a802447d-af97-46d8-8f36-1c342ddb4a4f-kube-api-access-mq5rn\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:47 crc kubenswrapper[4693]: I1122 09:05:47.996894 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-utilities\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.097941 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-catalog-content\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.098234 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mq5rn\" (UniqueName: \"kubernetes.io/projected/a802447d-af97-46d8-8f36-1c342ddb4a4f-kube-api-access-mq5rn\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.098259 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-utilities\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.098818 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-catalog-content\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.100096 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-utilities\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.127580 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mq5rn\" (UniqueName: \"kubernetes.io/projected/a802447d-af97-46d8-8f36-1c342ddb4a4f-kube-api-access-mq5rn\") pod \"redhat-marketplace-2d6dr\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.161522 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.162412 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rbw9"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.253370 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.292575 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:48 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:48 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:48 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.292634 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.445449 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-2d6dr"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.525096 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-4mxcr"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.525976 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.527908 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.535329 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4mxcr"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.604478 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-utilities\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.604584 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ml9nq\" (UniqueName: \"kubernetes.io/projected/ff7eb3fe-9db6-48e9-87c8-707818512fb5-kube-api-access-ml9nq\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.604617 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-catalog-content\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.707969 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-catalog-content\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.708327 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-utilities\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.708420 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ml9nq\" (UniqueName: \"kubernetes.io/projected/ff7eb3fe-9db6-48e9-87c8-707818512fb5-kube-api-access-ml9nq\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.708513 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-catalog-content\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.708760 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-utilities\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.738719 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ml9nq\" (UniqueName: \"kubernetes.io/projected/ff7eb3fe-9db6-48e9-87c8-707818512fb5-kube-api-access-ml9nq\") pod \"redhat-operators-4mxcr\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.841686 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.842426 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.847704 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.848166 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.849483 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.851426 4693 generic.go:334] "Generic (PLEG): container finished" podID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerID="09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e" exitCode=0 Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.852355 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2d6dr" event={"ID":"a802447d-af97-46d8-8f36-1c342ddb4a4f","Type":"ContainerDied","Data":"09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e"} Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.852424 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2d6dr" event={"ID":"a802447d-af97-46d8-8f36-1c342ddb4a4f","Type":"ContainerStarted","Data":"85dccfdea3a7cd44757f074b4ae0ed93d3f6f29554bdbed17e0b07c3ba49a82e"} Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.858734 4693 generic.go:334] "Generic (PLEG): container finished" podID="7684c092-86da-49ca-97d4-dfcebb032adf" containerID="d4bc4a9f4de1093eaa785bd8be091f11b4fcdd7e59df768c02583d95875690e3" exitCode=0 Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.859710 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rbw9" event={"ID":"7684c092-86da-49ca-97d4-dfcebb032adf","Type":"ContainerDied","Data":"d4bc4a9f4de1093eaa785bd8be091f11b4fcdd7e59df768c02583d95875690e3"} Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.859746 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rbw9" event={"ID":"7684c092-86da-49ca-97d4-dfcebb032adf","Type":"ContainerStarted","Data":"ce3156c57d478c635f3e322e8ad2a3874fee6fb49fd6c264e865f33592843204"} Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.860191 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.907317 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.911064 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e05669cb-7617-425d-bda6-bde1bb48fee1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.911162 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e05669cb-7617-425d-bda6-bde1bb48fee1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.933622 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-6nvwr"] Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.935986 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:48 crc kubenswrapper[4693]: I1122 09:05:48.951559 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6nvwr"] Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.012825 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e05669cb-7617-425d-bda6-bde1bb48fee1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.012914 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-catalog-content\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.013023 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e05669cb-7617-425d-bda6-bde1bb48fee1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.013057 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-997gk\" (UniqueName: \"kubernetes.io/projected/47e432ee-9866-4bab-b0d6-21183cc4698f-kube-api-access-997gk\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.013084 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-utilities\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.013116 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e05669cb-7617-425d-bda6-bde1bb48fee1-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.033701 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e05669cb-7617-425d-bda6-bde1bb48fee1-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.113744 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-997gk\" (UniqueName: \"kubernetes.io/projected/47e432ee-9866-4bab-b0d6-21183cc4698f-kube-api-access-997gk\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.114169 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-utilities\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.114243 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-catalog-content\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.115138 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-utilities\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.115542 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-catalog-content\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.128289 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-997gk\" (UniqueName: \"kubernetes.io/projected/47e432ee-9866-4bab-b0d6-21183cc4698f-kube-api-access-997gk\") pod \"redhat-operators-6nvwr\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.165451 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.166684 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-4mxcr"] Nov 22 09:05:49 crc kubenswrapper[4693]: W1122 09:05:49.194664 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podff7eb3fe_9db6_48e9_87c8_707818512fb5.slice/crio-1d51c04a1acd8af9f5f37c9229876d32567c9506009408775fdaeffdbdf22e6a WatchSource:0}: Error finding container 1d51c04a1acd8af9f5f37c9229876d32567c9506009408775fdaeffdbdf22e6a: Status 404 returned error can't find the container with id 1d51c04a1acd8af9f5f37c9229876d32567c9506009408775fdaeffdbdf22e6a Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.255379 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.279719 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:49 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:49 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:49 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.279772 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.475336 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 22 09:05:49 crc kubenswrapper[4693]: W1122 09:05:49.494182 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pode05669cb_7617_425d_bda6_bde1bb48fee1.slice/crio-f8c37a3597b558df855b9e88327f83824b196d05d90b09d0eb538c7ed0f3db3d WatchSource:0}: Error finding container f8c37a3597b558df855b9e88327f83824b196d05d90b09d0eb538c7ed0f3db3d: Status 404 returned error can't find the container with id f8c37a3597b558df855b9e88327f83824b196d05d90b09d0eb538c7ed0f3db3d Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.726306 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-6nvwr"] Nov 22 09:05:49 crc kubenswrapper[4693]: W1122 09:05:49.740521 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8 WatchSource:0}: Error finding container c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8: Status 404 returned error can't find the container with id c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8 Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.886379 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerStarted","Data":"c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8"} Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.891757 4693 generic.go:334] "Generic (PLEG): container finished" podID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerID="fbeddd9ede75cbc6c28291dde5870607eb4134b4cc762678a1aa96f184b9060c" exitCode=0 Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.891891 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerDied","Data":"fbeddd9ede75cbc6c28291dde5870607eb4134b4cc762678a1aa96f184b9060c"} Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.891941 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerStarted","Data":"1d51c04a1acd8af9f5f37c9229876d32567c9506009408775fdaeffdbdf22e6a"} Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.905467 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e05669cb-7617-425d-bda6-bde1bb48fee1","Type":"ContainerStarted","Data":"7c9888efbe6c7d3fceb56d7ef84a617f3bd1aab4f6364b1079af8470a40b7e90"} Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.906308 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e05669cb-7617-425d-bda6-bde1bb48fee1","Type":"ContainerStarted","Data":"f8c37a3597b558df855b9e88327f83824b196d05d90b09d0eb538c7ed0f3db3d"} Nov 22 09:05:49 crc kubenswrapper[4693]: I1122 09:05:49.919048 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=1.9190279540000001 podStartE2EDuration="1.919027954s" podCreationTimestamp="2025-11-22 09:05:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:05:49.917993649 +0000 UTC m=+146.060495970" watchObservedRunningTime="2025-11-22 09:05:49.919027954 +0000 UTC m=+146.061530245" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.037035 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.044019 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.138856 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.138980 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.139030 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.139770 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.141973 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.142408 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.257342 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.265117 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.269279 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.280473 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:50 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:50 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:50 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.280539 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.882875 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-984ks" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.904923 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.916835 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-m97gh" Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.925129 4693 generic.go:334] "Generic (PLEG): container finished" podID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerID="27368cbe46215577cf379ab04c68b57efa59c4767ee63334dcbc6cdb7c849281" exitCode=0 Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.925454 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerDied","Data":"27368cbe46215577cf379ab04c68b57efa59c4767ee63334dcbc6cdb7c849281"} Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.935753 4693 generic.go:334] "Generic (PLEG): container finished" podID="e05669cb-7617-425d-bda6-bde1bb48fee1" containerID="7c9888efbe6c7d3fceb56d7ef84a617f3bd1aab4f6364b1079af8470a40b7e90" exitCode=0 Nov 22 09:05:50 crc kubenswrapper[4693]: I1122 09:05:50.935812 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e05669cb-7617-425d-bda6-bde1bb48fee1","Type":"ContainerDied","Data":"7c9888efbe6c7d3fceb56d7ef84a617f3bd1aab4f6364b1079af8470a40b7e90"} Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.258167 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.276605 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.288354 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:51 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:51 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:51 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.288463 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.360993 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.361326 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.365370 4693 patch_prober.go:28] interesting pod/console-f9d7485db-m5ldq container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.365423 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m5ldq" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerName="console" probeResult="failure" output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.440318 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.441037 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.448362 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.448887 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.456654 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.464814 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.464914 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.566316 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.566385 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.566539 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.594689 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:51 crc kubenswrapper[4693]: I1122 09:05:51.793213 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:05:52 crc kubenswrapper[4693]: I1122 09:05:52.278454 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:52 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:52 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:52 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:52 crc kubenswrapper[4693]: I1122 09:05:52.278514 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:53 crc kubenswrapper[4693]: I1122 09:05:53.277749 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:53 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:53 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:53 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:53 crc kubenswrapper[4693]: I1122 09:05:53.278054 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:53 crc kubenswrapper[4693]: I1122 09:05:53.362199 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-rv5vl" Nov 22 09:05:54 crc kubenswrapper[4693]: I1122 09:05:54.277350 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:54 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:54 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:54 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:54 crc kubenswrapper[4693]: I1122 09:05:54.277408 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:54 crc kubenswrapper[4693]: I1122 09:05:54.976629 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"f6a61d48383cfe6175947489470faf25754c020ea0c4ed532e1194983672e77a"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.279954 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:55 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:55 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:55 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.280028 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:55 crc kubenswrapper[4693]: W1122 09:05:55.387728 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-f62fa413b85c1bcd669edb16830e483203529d4038159972858da757643af24a WatchSource:0}: Error finding container f62fa413b85c1bcd669edb16830e483203529d4038159972858da757643af24a: Status 404 returned error can't find the container with id f62fa413b85c1bcd669edb16830e483203529d4038159972858da757643af24a Nov 22 09:05:55 crc kubenswrapper[4693]: W1122 09:05:55.391053 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-86b124f0806a890dbf99695e0ab4438a5bdc04abcb562bccb5f9df4024d78da4 WatchSource:0}: Error finding container 86b124f0806a890dbf99695e0ab4438a5bdc04abcb562bccb5f9df4024d78da4: Status 404 returned error can't find the container with id 86b124f0806a890dbf99695e0ab4438a5bdc04abcb562bccb5f9df4024d78da4 Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.439683 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.625830 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e05669cb-7617-425d-bda6-bde1bb48fee1-kube-api-access\") pod \"e05669cb-7617-425d-bda6-bde1bb48fee1\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.626284 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e05669cb-7617-425d-bda6-bde1bb48fee1-kubelet-dir\") pod \"e05669cb-7617-425d-bda6-bde1bb48fee1\" (UID: \"e05669cb-7617-425d-bda6-bde1bb48fee1\") " Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.626444 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e05669cb-7617-425d-bda6-bde1bb48fee1-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "e05669cb-7617-425d-bda6-bde1bb48fee1" (UID: "e05669cb-7617-425d-bda6-bde1bb48fee1"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.626657 4693 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/e05669cb-7617-425d-bda6-bde1bb48fee1-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.631064 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e05669cb-7617-425d-bda6-bde1bb48fee1-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e05669cb-7617-425d-bda6-bde1bb48fee1" (UID: "e05669cb-7617-425d-bda6-bde1bb48fee1"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.728294 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e05669cb-7617-425d-bda6-bde1bb48fee1-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.821464 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 22 09:05:55 crc kubenswrapper[4693]: W1122 09:05:55.828258 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod51b3cdb1_79b5_44d5_98ef_b6cc072a8dde.slice/crio-c900413008a38fdbaf243e86c303ca760307b6d29aa50afc7c6f023934fdb17a WatchSource:0}: Error finding container c900413008a38fdbaf243e86c303ca760307b6d29aa50afc7c6f023934fdb17a: Status 404 returned error can't find the container with id c900413008a38fdbaf243e86c303ca760307b6d29aa50afc7c6f023934fdb17a Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.988875 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde","Type":"ContainerStarted","Data":"c900413008a38fdbaf243e86c303ca760307b6d29aa50afc7c6f023934fdb17a"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.991065 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.991076 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"e05669cb-7617-425d-bda6-bde1bb48fee1","Type":"ContainerDied","Data":"f8c37a3597b558df855b9e88327f83824b196d05d90b09d0eb538c7ed0f3db3d"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.991109 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f8c37a3597b558df855b9e88327f83824b196d05d90b09d0eb538c7ed0f3db3d" Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.993305 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"fd961844ac8d7683f2ce617567cc07d36e0cacb4560cc0ca06e0f9c8460352eb"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.993377 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"86b124f0806a890dbf99695e0ab4438a5bdc04abcb562bccb5f9df4024d78da4"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.996195 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"0298f7a1db19c8e4c8df4a0d4505e1b138da5610038240311d16dbefbef3607a"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.998200 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"9fffffb2e65dacaee145a4d55a7768397cb95f844f71e04f23aa95c83a82bc07"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.998240 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"f62fa413b85c1bcd669edb16830e483203529d4038159972858da757643af24a"} Nov 22 09:05:55 crc kubenswrapper[4693]: I1122 09:05:55.998433 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:05:56 crc kubenswrapper[4693]: I1122 09:05:56.279046 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:56 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:56 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:56 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:56 crc kubenswrapper[4693]: I1122 09:05:56.279327 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:57 crc kubenswrapper[4693]: I1122 09:05:57.277613 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:57 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:57 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:57 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:57 crc kubenswrapper[4693]: I1122 09:05:57.277668 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:58 crc kubenswrapper[4693]: I1122 09:05:58.278414 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:58 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:58 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:58 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:58 crc kubenswrapper[4693]: I1122 09:05:58.278469 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:05:59 crc kubenswrapper[4693]: I1122 09:05:59.277990 4693 patch_prober.go:28] interesting pod/router-default-5444994796-2z7n2 container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 22 09:05:59 crc kubenswrapper[4693]: [-]has-synced failed: reason withheld Nov 22 09:05:59 crc kubenswrapper[4693]: [+]process-running ok Nov 22 09:05:59 crc kubenswrapper[4693]: healthz check failed Nov 22 09:05:59 crc kubenswrapper[4693]: I1122 09:05:59.278274 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-2z7n2" podUID="21df8944-4816-4a75-83e5-0656e313029c" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.022114 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde","Type":"ContainerStarted","Data":"0baf4bbbb8c60e1cf9e7727af395e7807472b84c36f53b38091f8e4f0a225817"} Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.039332 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=9.039316846 podStartE2EDuration="9.039316846s" podCreationTimestamp="2025-11-22 09:05:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:00.032027491 +0000 UTC m=+156.174529782" watchObservedRunningTime="2025-11-22 09:06:00.039316846 +0000 UTC m=+156.181819137" Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.246814 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.246901 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.266951 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.279651 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:06:00 crc kubenswrapper[4693]: I1122 09:06:00.284311 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-2z7n2" Nov 22 09:06:01 crc kubenswrapper[4693]: I1122 09:06:01.034416 4693 generic.go:334] "Generic (PLEG): container finished" podID="51b3cdb1-79b5-44d5-98ef-b6cc072a8dde" containerID="0baf4bbbb8c60e1cf9e7727af395e7807472b84c36f53b38091f8e4f0a225817" exitCode=0 Nov 22 09:06:01 crc kubenswrapper[4693]: I1122 09:06:01.034533 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde","Type":"ContainerDied","Data":"0baf4bbbb8c60e1cf9e7727af395e7807472b84c36f53b38091f8e4f0a225817"} Nov 22 09:06:01 crc kubenswrapper[4693]: I1122 09:06:01.359604 4693 patch_prober.go:28] interesting pod/console-f9d7485db-m5ldq container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Nov 22 09:06:01 crc kubenswrapper[4693]: I1122 09:06:01.359703 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-m5ldq" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerName="console" probeResult="failure" output="Get \"https://10.217.0.7:8443/health\": dial tcp 10.217.0.7:8443: connect: connection refused" Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.540969 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.626164 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kubelet-dir\") pod \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.626382 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kube-api-access\") pod \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\" (UID: \"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde\") " Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.627941 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "51b3cdb1-79b5-44d5-98ef-b6cc072a8dde" (UID: "51b3cdb1-79b5-44d5-98ef-b6cc072a8dde"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.644614 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "51b3cdb1-79b5-44d5-98ef-b6cc072a8dde" (UID: "51b3cdb1-79b5-44d5-98ef-b6cc072a8dde"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.728024 4693 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:02 crc kubenswrapper[4693]: I1122 09:06:02.728052 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/51b3cdb1-79b5-44d5-98ef-b6cc072a8dde-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:03 crc kubenswrapper[4693]: I1122 09:06:03.033399 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:06:03 crc kubenswrapper[4693]: I1122 09:06:03.042373 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/fbc29b7c-60ea-4d65-ae31-fee4b8e7f833-metrics-certs\") pod \"network-metrics-daemon-t4blm\" (UID: \"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833\") " pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:06:03 crc kubenswrapper[4693]: I1122 09:06:03.049775 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"51b3cdb1-79b5-44d5-98ef-b6cc072a8dde","Type":"ContainerDied","Data":"c900413008a38fdbaf243e86c303ca760307b6d29aa50afc7c6f023934fdb17a"} Nov 22 09:06:03 crc kubenswrapper[4693]: I1122 09:06:03.049820 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 22 09:06:03 crc kubenswrapper[4693]: I1122 09:06:03.049825 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c900413008a38fdbaf243e86c303ca760307b6d29aa50afc7c6f023934fdb17a" Nov 22 09:06:03 crc kubenswrapper[4693]: I1122 09:06:03.056757 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-t4blm" Nov 22 09:06:06 crc kubenswrapper[4693]: I1122 09:06:06.686296 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:06:06 crc kubenswrapper[4693]: I1122 09:06:06.879997 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-t4blm"] Nov 22 09:06:06 crc kubenswrapper[4693]: W1122 09:06:06.905652 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfbc29b7c_60ea_4d65_ae31_fee4b8e7f833.slice/crio-5f54a4e61007c243786f741ca1ee08b84fe6bfab6347a266e5c29c1c7a2b35d4 WatchSource:0}: Error finding container 5f54a4e61007c243786f741ca1ee08b84fe6bfab6347a266e5c29c1c7a2b35d4: Status 404 returned error can't find the container with id 5f54a4e61007c243786f741ca1ee08b84fe6bfab6347a266e5c29c1c7a2b35d4 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.071898 4693 generic.go:334] "Generic (PLEG): container finished" podID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerID="599b5dcfac2b3422b420c4ef551f6e1a96126b45504671612e436051b6e2c40d" exitCode=0 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.072000 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f6rdl" event={"ID":"acb14103-18ad-40be-b7e1-2fe99282f86c","Type":"ContainerDied","Data":"599b5dcfac2b3422b420c4ef551f6e1a96126b45504671612e436051b6e2c40d"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.074191 4693 generic.go:334] "Generic (PLEG): container finished" podID="7684c092-86da-49ca-97d4-dfcebb032adf" containerID="2448ed9afe7eb1f6c1123da1d8042dcb92fd5da4e1e4cd5ee1d6ae7e4159f4c0" exitCode=0 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.074441 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rbw9" event={"ID":"7684c092-86da-49ca-97d4-dfcebb032adf","Type":"ContainerDied","Data":"2448ed9afe7eb1f6c1123da1d8042dcb92fd5da4e1e4cd5ee1d6ae7e4159f4c0"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.077203 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerStarted","Data":"04620cdd1ccaf8754a801685f75a4747038c88c82d60376d2dab217b05a1a2e5"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.079287 4693 generic.go:334] "Generic (PLEG): container finished" podID="79fc98da-aee2-436c-823b-5e608446dc29" containerID="dd2ca2c50994bb99cd3b09863e6d342d7d1baa89e5b2259214e17904ce088cb0" exitCode=0 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.079348 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwwtg" event={"ID":"79fc98da-aee2-436c-823b-5e608446dc29","Type":"ContainerDied","Data":"dd2ca2c50994bb99cd3b09863e6d342d7d1baa89e5b2259214e17904ce088cb0"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.082239 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerID="9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c" exitCode=0 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.082320 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4snx" event={"ID":"9ed69228-6c61-4355-89a3-0f4f13306faf","Type":"ContainerDied","Data":"9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.089633 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerStarted","Data":"a527b8c3be6b449669f0e23a16c5b3b44061d54e9657d4825de1f12c0fde08b3"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.092583 4693 generic.go:334] "Generic (PLEG): container finished" podID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerID="44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e" exitCode=0 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.092654 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rssjc" event={"ID":"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e","Type":"ContainerDied","Data":"44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.093577 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-t4blm" event={"ID":"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833","Type":"ContainerStarted","Data":"5f54a4e61007c243786f741ca1ee08b84fe6bfab6347a266e5c29c1c7a2b35d4"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.095577 4693 generic.go:334] "Generic (PLEG): container finished" podID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerID="f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2" exitCode=0 Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.095612 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2d6dr" event={"ID":"a802447d-af97-46d8-8f36-1c342ddb4a4f","Type":"ContainerDied","Data":"f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2"} Nov 22 09:06:07 crc kubenswrapper[4693]: I1122 09:06:07.702392 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-6s452"] Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.101496 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rssjc" event={"ID":"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e","Type":"ContainerStarted","Data":"6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.104651 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-t4blm" event={"ID":"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833","Type":"ContainerStarted","Data":"ae94eb3da57e8ae478e815e58e5a10183788a1f6963e3d733ffcd9c3ddb302b7"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.104676 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-t4blm" event={"ID":"fbc29b7c-60ea-4d65-ae31-fee4b8e7f833","Type":"ContainerStarted","Data":"9f17b64f848e4cba36980b94dd09df5440684683e5b87a6afe886127a38a45a7"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.106628 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4snx" event={"ID":"9ed69228-6c61-4355-89a3-0f4f13306faf","Type":"ContainerStarted","Data":"6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.108528 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f6rdl" event={"ID":"acb14103-18ad-40be-b7e1-2fe99282f86c","Type":"ContainerStarted","Data":"ef7f3d2efdf6c7739853c586ee572b93f1cd51bb87a76f921e267e2dd8a01199"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.110392 4693 generic.go:334] "Generic (PLEG): container finished" podID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerID="a527b8c3be6b449669f0e23a16c5b3b44061d54e9657d4825de1f12c0fde08b3" exitCode=0 Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.110433 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerDied","Data":"a527b8c3be6b449669f0e23a16c5b3b44061d54e9657d4825de1f12c0fde08b3"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.112709 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rbw9" event={"ID":"7684c092-86da-49ca-97d4-dfcebb032adf","Type":"ContainerStarted","Data":"63d63e46965040e4e07700997d880009025844d8f89e6af39e76d223f6e8c6ee"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.116793 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-rssjc" podStartSLOduration=2.277545426 podStartE2EDuration="23.116777102s" podCreationTimestamp="2025-11-22 09:05:45 +0000 UTC" firstStartedPulling="2025-11-22 09:05:46.805551948 +0000 UTC m=+142.948054239" lastFinishedPulling="2025-11-22 09:06:07.644783624 +0000 UTC m=+163.787285915" observedRunningTime="2025-11-22 09:06:08.115139003 +0000 UTC m=+164.257641294" watchObservedRunningTime="2025-11-22 09:06:08.116777102 +0000 UTC m=+164.259279393" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.119003 4693 generic.go:334] "Generic (PLEG): container finished" podID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerID="04620cdd1ccaf8754a801685f75a4747038c88c82d60376d2dab217b05a1a2e5" exitCode=0 Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.119048 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerDied","Data":"04620cdd1ccaf8754a801685f75a4747038c88c82d60376d2dab217b05a1a2e5"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.121501 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2d6dr" event={"ID":"a802447d-af97-46d8-8f36-1c342ddb4a4f","Type":"ContainerStarted","Data":"853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.123219 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwwtg" event={"ID":"79fc98da-aee2-436c-823b-5e608446dc29","Type":"ContainerStarted","Data":"f03b110816ac8d48273655d210ab8d84a12521a76e90bf968bb5f12c5c33d678"} Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.133461 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4rbw9" podStartSLOduration=2.407055848 podStartE2EDuration="21.133451744s" podCreationTimestamp="2025-11-22 09:05:47 +0000 UTC" firstStartedPulling="2025-11-22 09:05:48.861656545 +0000 UTC m=+145.004158836" lastFinishedPulling="2025-11-22 09:06:07.588052441 +0000 UTC m=+163.730554732" observedRunningTime="2025-11-22 09:06:08.131013689 +0000 UTC m=+164.273515980" watchObservedRunningTime="2025-11-22 09:06:08.133451744 +0000 UTC m=+164.275954034" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.183395 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-l4snx" podStartSLOduration=2.332392578 podStartE2EDuration="23.18338202s" podCreationTimestamp="2025-11-22 09:05:45 +0000 UTC" firstStartedPulling="2025-11-22 09:05:46.793899159 +0000 UTC m=+142.936401450" lastFinishedPulling="2025-11-22 09:06:07.6448886 +0000 UTC m=+163.787390892" observedRunningTime="2025-11-22 09:06:08.182240765 +0000 UTC m=+164.324743056" watchObservedRunningTime="2025-11-22 09:06:08.18338202 +0000 UTC m=+164.325884310" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.183915 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-t4blm" podStartSLOduration=147.183909792 podStartE2EDuration="2m27.183909792s" podCreationTimestamp="2025-11-22 09:03:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:08.148944412 +0000 UTC m=+164.291446703" watchObservedRunningTime="2025-11-22 09:06:08.183909792 +0000 UTC m=+164.326412083" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.207391 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-f6rdl" podStartSLOduration=2.2719356299999998 podStartE2EDuration="23.207378046s" podCreationTimestamp="2025-11-22 09:05:45 +0000 UTC" firstStartedPulling="2025-11-22 09:05:46.798320642 +0000 UTC m=+142.940822933" lastFinishedPulling="2025-11-22 09:06:07.733763058 +0000 UTC m=+163.876265349" observedRunningTime="2025-11-22 09:06:08.205461453 +0000 UTC m=+164.347963743" watchObservedRunningTime="2025-11-22 09:06:08.207378046 +0000 UTC m=+164.349880337" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.219523 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-hwwtg" podStartSLOduration=2.402507776 podStartE2EDuration="23.219511429s" podCreationTimestamp="2025-11-22 09:05:45 +0000 UTC" firstStartedPulling="2025-11-22 09:05:46.794622219 +0000 UTC m=+142.937124510" lastFinishedPulling="2025-11-22 09:06:07.611625872 +0000 UTC m=+163.754128163" observedRunningTime="2025-11-22 09:06:08.218574116 +0000 UTC m=+164.361076407" watchObservedRunningTime="2025-11-22 09:06:08.219511429 +0000 UTC m=+164.362013719" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.235632 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-2d6dr" podStartSLOduration=2.533449359 podStartE2EDuration="21.235609155s" podCreationTimestamp="2025-11-22 09:05:47 +0000 UTC" firstStartedPulling="2025-11-22 09:05:48.858556126 +0000 UTC m=+145.001058418" lastFinishedPulling="2025-11-22 09:06:07.560715923 +0000 UTC m=+163.703218214" observedRunningTime="2025-11-22 09:06:08.232078487 +0000 UTC m=+164.374580768" watchObservedRunningTime="2025-11-22 09:06:08.235609155 +0000 UTC m=+164.378111446" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.254974 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:06:08 crc kubenswrapper[4693]: I1122 09:06:08.255017 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.130593 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerStarted","Data":"8cc299e6b69b80d469d1f6731364d678cc4f2df27e9091f23312efb577abbf9c"} Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.134086 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerStarted","Data":"1279d1d415832fd3b23f5faec78b2dc0eacabdc326137fa3dde6539ad014010f"} Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.148926 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-6nvwr" podStartSLOduration=7.527430368 podStartE2EDuration="21.148911331s" podCreationTimestamp="2025-11-22 09:05:48 +0000 UTC" firstStartedPulling="2025-11-22 09:05:54.958797483 +0000 UTC m=+151.101299773" lastFinishedPulling="2025-11-22 09:06:08.580278445 +0000 UTC m=+164.722780736" observedRunningTime="2025-11-22 09:06:09.147434394 +0000 UTC m=+165.289936685" watchObservedRunningTime="2025-11-22 09:06:09.148911331 +0000 UTC m=+165.291413623" Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.161299 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-4mxcr" podStartSLOduration=2.250690308 podStartE2EDuration="21.161290485s" podCreationTimestamp="2025-11-22 09:05:48 +0000 UTC" firstStartedPulling="2025-11-22 09:05:49.904750259 +0000 UTC m=+146.047252550" lastFinishedPulling="2025-11-22 09:06:08.815350436 +0000 UTC m=+164.957852727" observedRunningTime="2025-11-22 09:06:09.160915741 +0000 UTC m=+165.303418032" watchObservedRunningTime="2025-11-22 09:06:09.161290485 +0000 UTC m=+165.303792777" Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.255979 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.256028 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.367999 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-2d6dr" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="registry-server" probeResult="failure" output=< Nov 22 09:06:09 crc kubenswrapper[4693]: timeout: failed to connect service ":50051" within 1s Nov 22 09:06:09 crc kubenswrapper[4693]: > Nov 22 09:06:09 crc kubenswrapper[4693]: I1122 09:06:09.732586 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-tfptk" Nov 22 09:06:10 crc kubenswrapper[4693]: I1122 09:06:10.292107 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-6nvwr" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="registry-server" probeResult="failure" output=< Nov 22 09:06:10 crc kubenswrapper[4693]: timeout: failed to connect service ":50051" within 1s Nov 22 09:06:10 crc kubenswrapper[4693]: > Nov 22 09:06:11 crc kubenswrapper[4693]: I1122 09:06:11.362408 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:06:11 crc kubenswrapper[4693]: I1122 09:06:11.365165 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:06:15 crc kubenswrapper[4693]: I1122 09:06:15.677105 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:06:15 crc kubenswrapper[4693]: I1122 09:06:15.677159 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:06:15 crc kubenswrapper[4693]: I1122 09:06:15.712860 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:06:15 crc kubenswrapper[4693]: I1122 09:06:15.897467 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:06:15 crc kubenswrapper[4693]: I1122 09:06:15.897494 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:06:15 crc kubenswrapper[4693]: I1122 09:06:15.923226 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.073379 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.073418 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.101036 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.204756 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.204829 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.211482 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.305059 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.305099 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:06:16 crc kubenswrapper[4693]: I1122 09:06:16.329987 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:06:17 crc kubenswrapper[4693]: I1122 09:06:17.209396 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:06:17 crc kubenswrapper[4693]: I1122 09:06:17.845101 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:06:17 crc kubenswrapper[4693]: I1122 09:06:17.845152 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:06:17 crc kubenswrapper[4693]: I1122 09:06:17.871920 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.209371 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.279811 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.299820 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4snx"] Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.305290 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.495384 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rssjc"] Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.495759 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-rssjc" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="registry-server" containerID="cri-o://6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac" gracePeriod=2 Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.857963 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.907682 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.907911 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:06:18 crc kubenswrapper[4693]: I1122 09:06:18.941201 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.038929 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-catalog-content\") pod \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.038976 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-utilities\") pod \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.038999 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgvpb\" (UniqueName: \"kubernetes.io/projected/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-kube-api-access-fgvpb\") pod \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\" (UID: \"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e\") " Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.039530 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-utilities" (OuterVolumeSpecName: "utilities") pod "7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" (UID: "7f7b3933-d7f1-43d6-973c-1b8e0a774c5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.042990 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-kube-api-access-fgvpb" (OuterVolumeSpecName: "kube-api-access-fgvpb") pod "7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" (UID: "7f7b3933-d7f1-43d6-973c-1b8e0a774c5e"). InnerVolumeSpecName "kube-api-access-fgvpb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.071766 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" (UID: "7f7b3933-d7f1-43d6-973c-1b8e0a774c5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.140114 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.140143 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.140153 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgvpb\" (UniqueName: \"kubernetes.io/projected/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e-kube-api-access-fgvpb\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.192818 4693 generic.go:334] "Generic (PLEG): container finished" podID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerID="6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac" exitCode=0 Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.192877 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rssjc" event={"ID":"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e","Type":"ContainerDied","Data":"6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac"} Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.192898 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-rssjc" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.192923 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-rssjc" event={"ID":"7f7b3933-d7f1-43d6-973c-1b8e0a774c5e","Type":"ContainerDied","Data":"b198cd54b4ea20661c797ae7b09aa2b6d878d3a54f6e783a4d1e3f16c3628a9f"} Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.192943 4693 scope.go:117] "RemoveContainer" containerID="6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.193732 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-l4snx" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="registry-server" containerID="cri-o://6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9" gracePeriod=2 Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.204580 4693 scope.go:117] "RemoveContainer" containerID="44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.214219 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-rssjc"] Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.216167 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-rssjc"] Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.220035 4693 scope.go:117] "RemoveContainer" containerID="993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.221160 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.282890 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.294911 4693 scope.go:117] "RemoveContainer" containerID="6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac" Nov 22 09:06:19 crc kubenswrapper[4693]: E1122 09:06:19.295287 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac\": container with ID starting with 6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac not found: ID does not exist" containerID="6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.295316 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac"} err="failed to get container status \"6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac\": rpc error: code = NotFound desc = could not find container \"6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac\": container with ID starting with 6053a0cff68491d6262bbfe556ccc71ee73aab40790064cf6f01dd049f105dac not found: ID does not exist" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.295337 4693 scope.go:117] "RemoveContainer" containerID="44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e" Nov 22 09:06:19 crc kubenswrapper[4693]: E1122 09:06:19.295699 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e\": container with ID starting with 44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e not found: ID does not exist" containerID="44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.295720 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e"} err="failed to get container status \"44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e\": rpc error: code = NotFound desc = could not find container \"44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e\": container with ID starting with 44a01f2bdc05777996857857360bc14659d88b7308a26b99399a8cd9f52e292e not found: ID does not exist" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.295732 4693 scope.go:117] "RemoveContainer" containerID="993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7" Nov 22 09:06:19 crc kubenswrapper[4693]: E1122 09:06:19.296011 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7\": container with ID starting with 993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7 not found: ID does not exist" containerID="993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.296032 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7"} err="failed to get container status \"993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7\": rpc error: code = NotFound desc = could not find container \"993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7\": container with ID starting with 993dc138eccb017a85efed7ee39e29f042f92c6163820ac4dde90bb14a1e84b7 not found: ID does not exist" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.313985 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.562489 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.647928 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-catalog-content\") pod \"9ed69228-6c61-4355-89a3-0f4f13306faf\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.648012 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-utilities\") pod \"9ed69228-6c61-4355-89a3-0f4f13306faf\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.648044 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-srrw7\" (UniqueName: \"kubernetes.io/projected/9ed69228-6c61-4355-89a3-0f4f13306faf-kube-api-access-srrw7\") pod \"9ed69228-6c61-4355-89a3-0f4f13306faf\" (UID: \"9ed69228-6c61-4355-89a3-0f4f13306faf\") " Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.648560 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-utilities" (OuterVolumeSpecName: "utilities") pod "9ed69228-6c61-4355-89a3-0f4f13306faf" (UID: "9ed69228-6c61-4355-89a3-0f4f13306faf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.650444 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ed69228-6c61-4355-89a3-0f4f13306faf-kube-api-access-srrw7" (OuterVolumeSpecName: "kube-api-access-srrw7") pod "9ed69228-6c61-4355-89a3-0f4f13306faf" (UID: "9ed69228-6c61-4355-89a3-0f4f13306faf"). InnerVolumeSpecName "kube-api-access-srrw7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.684778 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9ed69228-6c61-4355-89a3-0f4f13306faf" (UID: "9ed69228-6c61-4355-89a3-0f4f13306faf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.749694 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.749719 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-srrw7\" (UniqueName: \"kubernetes.io/projected/9ed69228-6c61-4355-89a3-0f4f13306faf-kube-api-access-srrw7\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:19 crc kubenswrapper[4693]: I1122 09:06:19.749729 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9ed69228-6c61-4355-89a3-0f4f13306faf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.151574 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" path="/var/lib/kubelet/pods/7f7b3933-d7f1-43d6-973c-1b8e0a774c5e/volumes" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.198146 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerID="6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9" exitCode=0 Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.198212 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4snx" event={"ID":"9ed69228-6c61-4355-89a3-0f4f13306faf","Type":"ContainerDied","Data":"6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9"} Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.198236 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-l4snx" event={"ID":"9ed69228-6c61-4355-89a3-0f4f13306faf","Type":"ContainerDied","Data":"ac9f39a3f0c958b1a4af759823bf1137bd9bd4dfe911c887034858e6fa95e6c9"} Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.198252 4693 scope.go:117] "RemoveContainer" containerID="6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.198415 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-l4snx" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.209953 4693 scope.go:117] "RemoveContainer" containerID="9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.210725 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-l4snx"] Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.213093 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-l4snx"] Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.220976 4693 scope.go:117] "RemoveContainer" containerID="0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.232033 4693 scope.go:117] "RemoveContainer" containerID="6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9" Nov 22 09:06:20 crc kubenswrapper[4693]: E1122 09:06:20.232331 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9\": container with ID starting with 6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9 not found: ID does not exist" containerID="6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.232360 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9"} err="failed to get container status \"6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9\": rpc error: code = NotFound desc = could not find container \"6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9\": container with ID starting with 6594df59437e025a56ea1b2a8513314678556eda744026de04b7fef111ce5cd9 not found: ID does not exist" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.232377 4693 scope.go:117] "RemoveContainer" containerID="9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c" Nov 22 09:06:20 crc kubenswrapper[4693]: E1122 09:06:20.232706 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c\": container with ID starting with 9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c not found: ID does not exist" containerID="9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.232730 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c"} err="failed to get container status \"9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c\": rpc error: code = NotFound desc = could not find container \"9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c\": container with ID starting with 9b35044966795ee22a5d8b3b26a6baf9d4a6188bdaf1c054125921b7a8afcf7c not found: ID does not exist" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.232765 4693 scope.go:117] "RemoveContainer" containerID="0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71" Nov 22 09:06:20 crc kubenswrapper[4693]: E1122 09:06:20.233150 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71\": container with ID starting with 0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71 not found: ID does not exist" containerID="0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.233183 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71"} err="failed to get container status \"0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71\": rpc error: code = NotFound desc = could not find container \"0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71\": container with ID starting with 0f6938df465d8bdeaa7242d5bfe0132bab65287b17063d0a73c0de3a753d5b71 not found: ID does not exist" Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.694450 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2d6dr"] Nov 22 09:06:20 crc kubenswrapper[4693]: I1122 09:06:20.694634 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-2d6dr" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="registry-server" containerID="cri-o://853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7" gracePeriod=2 Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.058788 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.163828 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mq5rn\" (UniqueName: \"kubernetes.io/projected/a802447d-af97-46d8-8f36-1c342ddb4a4f-kube-api-access-mq5rn\") pod \"a802447d-af97-46d8-8f36-1c342ddb4a4f\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.163893 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-utilities\") pod \"a802447d-af97-46d8-8f36-1c342ddb4a4f\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.163949 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-catalog-content\") pod \"a802447d-af97-46d8-8f36-1c342ddb4a4f\" (UID: \"a802447d-af97-46d8-8f36-1c342ddb4a4f\") " Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.164536 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-utilities" (OuterVolumeSpecName: "utilities") pod "a802447d-af97-46d8-8f36-1c342ddb4a4f" (UID: "a802447d-af97-46d8-8f36-1c342ddb4a4f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.167306 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a802447d-af97-46d8-8f36-1c342ddb4a4f-kube-api-access-mq5rn" (OuterVolumeSpecName: "kube-api-access-mq5rn") pod "a802447d-af97-46d8-8f36-1c342ddb4a4f" (UID: "a802447d-af97-46d8-8f36-1c342ddb4a4f"). InnerVolumeSpecName "kube-api-access-mq5rn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.178390 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a802447d-af97-46d8-8f36-1c342ddb4a4f" (UID: "a802447d-af97-46d8-8f36-1c342ddb4a4f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.206292 4693 generic.go:334] "Generic (PLEG): container finished" podID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerID="853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7" exitCode=0 Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.206350 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-2d6dr" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.206357 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2d6dr" event={"ID":"a802447d-af97-46d8-8f36-1c342ddb4a4f","Type":"ContainerDied","Data":"853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7"} Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.206384 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-2d6dr" event={"ID":"a802447d-af97-46d8-8f36-1c342ddb4a4f","Type":"ContainerDied","Data":"85dccfdea3a7cd44757f074b4ae0ed93d3f6f29554bdbed17e0b07c3ba49a82e"} Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.206399 4693 scope.go:117] "RemoveContainer" containerID="853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.219980 4693 scope.go:117] "RemoveContainer" containerID="f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.233521 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-2d6dr"] Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.237900 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-2d6dr"] Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.251460 4693 scope.go:117] "RemoveContainer" containerID="09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.263547 4693 scope.go:117] "RemoveContainer" containerID="853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7" Nov 22 09:06:21 crc kubenswrapper[4693]: E1122 09:06:21.264262 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7\": container with ID starting with 853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7 not found: ID does not exist" containerID="853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264303 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7"} err="failed to get container status \"853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7\": rpc error: code = NotFound desc = could not find container \"853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7\": container with ID starting with 853a811bf7ecb3da42205640060f2c6dbe9274ff455a872c909b84658379e3e7 not found: ID does not exist" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264343 4693 scope.go:117] "RemoveContainer" containerID="f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2" Nov 22 09:06:21 crc kubenswrapper[4693]: E1122 09:06:21.264900 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2\": container with ID starting with f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2 not found: ID does not exist" containerID="f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264927 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2"} err="failed to get container status \"f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2\": rpc error: code = NotFound desc = could not find container \"f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2\": container with ID starting with f27a1a7f04ba6e46bc85e6940a06a62c176549ed1e8c30c253c4245d928229c2 not found: ID does not exist" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264941 4693 scope.go:117] "RemoveContainer" containerID="09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264963 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mq5rn\" (UniqueName: \"kubernetes.io/projected/a802447d-af97-46d8-8f36-1c342ddb4a4f-kube-api-access-mq5rn\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264981 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.264991 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a802447d-af97-46d8-8f36-1c342ddb4a4f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:21 crc kubenswrapper[4693]: E1122 09:06:21.265222 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e\": container with ID starting with 09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e not found: ID does not exist" containerID="09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e" Nov 22 09:06:21 crc kubenswrapper[4693]: I1122 09:06:21.265254 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e"} err="failed to get container status \"09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e\": rpc error: code = NotFound desc = could not find container \"09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e\": container with ID starting with 09c84ed1fc2150b2ff6068689325d5af4e22d7d761aa29b3e3975892f1b2326e not found: ID does not exist" Nov 22 09:06:22 crc kubenswrapper[4693]: I1122 09:06:22.152240 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" path="/var/lib/kubelet/pods/9ed69228-6c61-4355-89a3-0f4f13306faf/volumes" Nov 22 09:06:22 crc kubenswrapper[4693]: I1122 09:06:22.152773 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" path="/var/lib/kubelet/pods/a802447d-af97-46d8-8f36-1c342ddb4a4f/volumes" Nov 22 09:06:22 crc kubenswrapper[4693]: I1122 09:06:22.636071 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4ck6n"] Nov 22 09:06:22 crc kubenswrapper[4693]: I1122 09:06:22.636235 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" podUID="5f7f9670-71e1-4be0-955b-7fdf8c953c20" containerName="controller-manager" containerID="cri-o://2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5" gracePeriod=30 Nov 22 09:06:22 crc kubenswrapper[4693]: I1122 09:06:22.733268 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx"] Nov 22 09:06:22 crc kubenswrapper[4693]: I1122 09:06:22.733432 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" podUID="c9649ff6-4471-48d9-b751-56ac85bd9c91" containerName="route-controller-manager" containerID="cri-o://e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3" gracePeriod=30 Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.080277 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.095929 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6nvwr"] Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.096159 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-6nvwr" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="registry-server" containerID="cri-o://8cc299e6b69b80d469d1f6731364d678cc4f2df27e9091f23312efb577abbf9c" gracePeriod=2 Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.180674 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8czr\" (UniqueName: \"kubernetes.io/projected/c9649ff6-4471-48d9-b751-56ac85bd9c91-kube-api-access-z8czr\") pod \"c9649ff6-4471-48d9-b751-56ac85bd9c91\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.180733 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-client-ca\") pod \"c9649ff6-4471-48d9-b751-56ac85bd9c91\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.180789 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-config\") pod \"c9649ff6-4471-48d9-b751-56ac85bd9c91\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.180809 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9649ff6-4471-48d9-b751-56ac85bd9c91-serving-cert\") pod \"c9649ff6-4471-48d9-b751-56ac85bd9c91\" (UID: \"c9649ff6-4471-48d9-b751-56ac85bd9c91\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.181267 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-client-ca" (OuterVolumeSpecName: "client-ca") pod "c9649ff6-4471-48d9-b751-56ac85bd9c91" (UID: "c9649ff6-4471-48d9-b751-56ac85bd9c91"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.181361 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-config" (OuterVolumeSpecName: "config") pod "c9649ff6-4471-48d9-b751-56ac85bd9c91" (UID: "c9649ff6-4471-48d9-b751-56ac85bd9c91"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.185544 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c9649ff6-4471-48d9-b751-56ac85bd9c91-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "c9649ff6-4471-48d9-b751-56ac85bd9c91" (UID: "c9649ff6-4471-48d9-b751-56ac85bd9c91"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.185553 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9649ff6-4471-48d9-b751-56ac85bd9c91-kube-api-access-z8czr" (OuterVolumeSpecName: "kube-api-access-z8czr") pod "c9649ff6-4471-48d9-b751-56ac85bd9c91" (UID: "c9649ff6-4471-48d9-b751-56ac85bd9c91"). InnerVolumeSpecName "kube-api-access-z8czr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.195478 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.222346 4693 generic.go:334] "Generic (PLEG): container finished" podID="5f7f9670-71e1-4be0-955b-7fdf8c953c20" containerID="2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5" exitCode=0 Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.222424 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" event={"ID":"5f7f9670-71e1-4be0-955b-7fdf8c953c20","Type":"ContainerDied","Data":"2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5"} Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.222466 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" event={"ID":"5f7f9670-71e1-4be0-955b-7fdf8c953c20","Type":"ContainerDied","Data":"ac4ecd2e0ffe9d8b444cfa3cb70d41eac82d1f5326fe9533584d860c0ec73b08"} Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.222483 4693 scope.go:117] "RemoveContainer" containerID="2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.222567 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-4ck6n" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.228109 4693 generic.go:334] "Generic (PLEG): container finished" podID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerID="8cc299e6b69b80d469d1f6731364d678cc4f2df27e9091f23312efb577abbf9c" exitCode=0 Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.228197 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerDied","Data":"8cc299e6b69b80d469d1f6731364d678cc4f2df27e9091f23312efb577abbf9c"} Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.229459 4693 generic.go:334] "Generic (PLEG): container finished" podID="c9649ff6-4471-48d9-b751-56ac85bd9c91" containerID="e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3" exitCode=0 Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.229496 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" event={"ID":"c9649ff6-4471-48d9-b751-56ac85bd9c91","Type":"ContainerDied","Data":"e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3"} Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.229519 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" event={"ID":"c9649ff6-4471-48d9-b751-56ac85bd9c91","Type":"ContainerDied","Data":"53a0330b188e3e7654370757e8c67a79dec1dd65fae989d6dae409f943241bd1"} Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.229532 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.240720 4693 scope.go:117] "RemoveContainer" containerID="2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.241241 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5\": container with ID starting with 2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5 not found: ID does not exist" containerID="2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.241265 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5"} err="failed to get container status \"2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5\": rpc error: code = NotFound desc = could not find container \"2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5\": container with ID starting with 2cbce10a93301e97e9e6d70337cd614e97b12f30462d0f90b1f3d2b2a118a0b5 not found: ID does not exist" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.241284 4693 scope.go:117] "RemoveContainer" containerID="e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.252932 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx"] Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.253994 4693 scope.go:117] "RemoveContainer" containerID="e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.254462 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3\": container with ID starting with e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3 not found: ID does not exist" containerID="e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.254548 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3"} err="failed to get container status \"e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3\": rpc error: code = NotFound desc = could not find container \"e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3\": container with ID starting with e4471aed03cb44d8ecaf84874a5bbb07ea78108749f9a847a9b55d9007675ea3 not found: ID does not exist" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.255023 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-dd9wx"] Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283609 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqtsz\" (UniqueName: \"kubernetes.io/projected/5f7f9670-71e1-4be0-955b-7fdf8c953c20-kube-api-access-mqtsz\") pod \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283693 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-client-ca\") pod \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283733 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-config\") pod \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283750 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-proxy-ca-bundles\") pod \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283772 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7f9670-71e1-4be0-955b-7fdf8c953c20-serving-cert\") pod \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\" (UID: \"5f7f9670-71e1-4be0-955b-7fdf8c953c20\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283980 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8czr\" (UniqueName: \"kubernetes.io/projected/c9649ff6-4471-48d9-b751-56ac85bd9c91-kube-api-access-z8czr\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.283992 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.284001 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c9649ff6-4471-48d9-b751-56ac85bd9c91-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.284009 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c9649ff6-4471-48d9-b751-56ac85bd9c91-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.284487 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-client-ca" (OuterVolumeSpecName: "client-ca") pod "5f7f9670-71e1-4be0-955b-7fdf8c953c20" (UID: "5f7f9670-71e1-4be0-955b-7fdf8c953c20"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.284497 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "5f7f9670-71e1-4be0-955b-7fdf8c953c20" (UID: "5f7f9670-71e1-4be0-955b-7fdf8c953c20"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.284535 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-config" (OuterVolumeSpecName: "config") pod "5f7f9670-71e1-4be0-955b-7fdf8c953c20" (UID: "5f7f9670-71e1-4be0-955b-7fdf8c953c20"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.286352 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f7f9670-71e1-4be0-955b-7fdf8c953c20-kube-api-access-mqtsz" (OuterVolumeSpecName: "kube-api-access-mqtsz") pod "5f7f9670-71e1-4be0-955b-7fdf8c953c20" (UID: "5f7f9670-71e1-4be0-955b-7fdf8c953c20"). InnerVolumeSpecName "kube-api-access-mqtsz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.287113 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f7f9670-71e1-4be0-955b-7fdf8c953c20-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5f7f9670-71e1-4be0-955b-7fdf8c953c20" (UID: "5f7f9670-71e1-4be0-955b-7fdf8c953c20"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.382517 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.384406 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqtsz\" (UniqueName: \"kubernetes.io/projected/5f7f9670-71e1-4be0-955b-7fdf8c953c20-kube-api-access-mqtsz\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.384432 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.384443 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.384452 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5f7f9670-71e1-4be0-955b-7fdf8c953c20-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.384459 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5f7f9670-71e1-4be0-955b-7fdf8c953c20-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.485300 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-utilities\") pod \"47e432ee-9866-4bab-b0d6-21183cc4698f\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.485346 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-997gk\" (UniqueName: \"kubernetes.io/projected/47e432ee-9866-4bab-b0d6-21183cc4698f-kube-api-access-997gk\") pod \"47e432ee-9866-4bab-b0d6-21183cc4698f\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.485430 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-catalog-content\") pod \"47e432ee-9866-4bab-b0d6-21183cc4698f\" (UID: \"47e432ee-9866-4bab-b0d6-21183cc4698f\") " Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.485881 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-utilities" (OuterVolumeSpecName: "utilities") pod "47e432ee-9866-4bab-b0d6-21183cc4698f" (UID: "47e432ee-9866-4bab-b0d6-21183cc4698f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.488235 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47e432ee-9866-4bab-b0d6-21183cc4698f-kube-api-access-997gk" (OuterVolumeSpecName: "kube-api-access-997gk") pod "47e432ee-9866-4bab-b0d6-21183cc4698f" (UID: "47e432ee-9866-4bab-b0d6-21183cc4698f"). InnerVolumeSpecName "kube-api-access-997gk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.541058 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4ck6n"] Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.541089 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-4ck6n"] Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.553756 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "47e432ee-9866-4bab-b0d6-21183cc4698f" (UID: "47e432ee-9866-4bab-b0d6-21183cc4698f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.586534 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.586561 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-997gk\" (UniqueName: \"kubernetes.io/projected/47e432ee-9866-4bab-b0d6-21183cc4698f-kube-api-access-997gk\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.586571 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/47e432ee-9866-4bab-b0d6-21183cc4698f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714632 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-64d758d556-mz4w8"] Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714792 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="51b3cdb1-79b5-44d5-98ef-b6cc072a8dde" containerName="pruner" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714808 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="51b3cdb1-79b5-44d5-98ef-b6cc072a8dde" containerName="pruner" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714819 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e05669cb-7617-425d-bda6-bde1bb48fee1" containerName="pruner" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714825 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e05669cb-7617-425d-bda6-bde1bb48fee1" containerName="pruner" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714833 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714851 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714859 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714864 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714873 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714878 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714886 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714891 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714897 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714902 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714910 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714916 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714922 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714927 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="extract-utilities" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714935 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714940 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714947 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9649ff6-4471-48d9-b751-56ac85bd9c91" containerName="route-controller-manager" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714953 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9649ff6-4471-48d9-b751-56ac85bd9c91" containerName="route-controller-manager" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714960 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7f9670-71e1-4be0-955b-7fdf8c953c20" containerName="controller-manager" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714965 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7f9670-71e1-4be0-955b-7fdf8c953c20" containerName="controller-manager" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714973 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714978 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714985 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.714990 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.714997 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715002 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="extract-content" Nov 22 09:06:23 crc kubenswrapper[4693]: E1122 09:06:23.715009 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715016 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715086 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e05669cb-7617-425d-bda6-bde1bb48fee1" containerName="pruner" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715094 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9649ff6-4471-48d9-b751-56ac85bd9c91" containerName="route-controller-manager" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715102 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f7b3933-d7f1-43d6-973c-1b8e0a774c5e" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715108 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ed69228-6c61-4355-89a3-0f4f13306faf" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715114 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f7f9670-71e1-4be0-955b-7fdf8c953c20" containerName="controller-manager" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715122 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715128 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a802447d-af97-46d8-8f36-1c342ddb4a4f" containerName="registry-server" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715134 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="51b3cdb1-79b5-44d5-98ef-b6cc072a8dde" containerName="pruner" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.715418 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.718344 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.718470 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.718499 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.718827 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.719656 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.719877 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.720835 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-64d758d556-mz4w8"] Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.723217 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.788323 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-proxy-ca-bundles\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.788371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dfd539-c463-47e6-83a3-31ee0a6e8202-serving-cert\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.788395 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-config\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.788427 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-client-ca\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.788479 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8wmbv\" (UniqueName: \"kubernetes.io/projected/a1dfd539-c463-47e6-83a3-31ee0a6e8202-kube-api-access-8wmbv\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.889949 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-proxy-ca-bundles\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.889997 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dfd539-c463-47e6-83a3-31ee0a6e8202-serving-cert\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.890015 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-config\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.890036 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-client-ca\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.890054 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8wmbv\" (UniqueName: \"kubernetes.io/projected/a1dfd539-c463-47e6-83a3-31ee0a6e8202-kube-api-access-8wmbv\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.891482 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-proxy-ca-bundles\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.892555 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-client-ca\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.892672 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-config\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.893727 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dfd539-c463-47e6-83a3-31ee0a6e8202-serving-cert\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:23 crc kubenswrapper[4693]: I1122 09:06:23.906482 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8wmbv\" (UniqueName: \"kubernetes.io/projected/a1dfd539-c463-47e6-83a3-31ee0a6e8202-kube-api-access-8wmbv\") pod \"controller-manager-64d758d556-mz4w8\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.037254 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.155558 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f7f9670-71e1-4be0-955b-7fdf8c953c20" path="/var/lib/kubelet/pods/5f7f9670-71e1-4be0-955b-7fdf8c953c20/volumes" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.156225 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9649ff6-4471-48d9-b751-56ac85bd9c91" path="/var/lib/kubelet/pods/c9649ff6-4471-48d9-b751-56ac85bd9c91/volumes" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.240550 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-6nvwr" event={"ID":"47e432ee-9866-4bab-b0d6-21183cc4698f","Type":"ContainerDied","Data":"c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8"} Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.240767 4693 scope.go:117] "RemoveContainer" containerID="8cc299e6b69b80d469d1f6731364d678cc4f2df27e9091f23312efb577abbf9c" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.240912 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-6nvwr" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.260133 4693 scope.go:117] "RemoveContainer" containerID="a527b8c3be6b449669f0e23a16c5b3b44061d54e9657d4825de1f12c0fde08b3" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.261869 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-6nvwr"] Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.269077 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-6nvwr"] Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.278409 4693 scope.go:117] "RemoveContainer" containerID="27368cbe46215577cf379ab04c68b57efa59c4767ee63334dcbc6cdb7c849281" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.413330 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-64d758d556-mz4w8"] Nov 22 09:06:24 crc kubenswrapper[4693]: W1122 09:06:24.419040 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1dfd539_c463_47e6_83a3_31ee0a6e8202.slice/crio-875d1cd6c96c47f9a1eac39eb63faa707698c93aa585d06af03a637c39727ed0 WatchSource:0}: Error finding container 875d1cd6c96c47f9a1eac39eb63faa707698c93aa585d06af03a637c39727ed0: Status 404 returned error can't find the container with id 875d1cd6c96c47f9a1eac39eb63faa707698c93aa585d06af03a637c39727ed0 Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.717235 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc"] Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.717762 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.719186 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.719781 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.719811 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.721594 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.721956 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.723787 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.732191 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc"] Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.798511 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gkk94\" (UniqueName: \"kubernetes.io/projected/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-kube-api-access-gkk94\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.798747 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-serving-cert\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.798837 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-config\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.798989 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-client-ca\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.900496 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gkk94\" (UniqueName: \"kubernetes.io/projected/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-kube-api-access-gkk94\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.900755 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-serving-cert\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.900864 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-config\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.900988 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-client-ca\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.901707 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-client-ca\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.901863 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-config\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.905416 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-serving-cert\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:24 crc kubenswrapper[4693]: I1122 09:06:24.916261 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gkk94\" (UniqueName: \"kubernetes.io/projected/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-kube-api-access-gkk94\") pod \"route-controller-manager-7d5b594984-zbwcc\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.028934 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:25 crc kubenswrapper[4693]: E1122 09:06:25.148264 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8\": RecentStats: unable to find data in memory cache]" Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.246361 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" event={"ID":"a1dfd539-c463-47e6-83a3-31ee0a6e8202","Type":"ContainerStarted","Data":"dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902"} Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.246627 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.246650 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" event={"ID":"a1dfd539-c463-47e6-83a3-31ee0a6e8202","Type":"ContainerStarted","Data":"875d1cd6c96c47f9a1eac39eb63faa707698c93aa585d06af03a637c39727ed0"} Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.250464 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.263039 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" podStartSLOduration=3.263024899 podStartE2EDuration="3.263024899s" podCreationTimestamp="2025-11-22 09:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:25.259591885 +0000 UTC m=+181.402094176" watchObservedRunningTime="2025-11-22 09:06:25.263024899 +0000 UTC m=+181.405527190" Nov 22 09:06:25 crc kubenswrapper[4693]: I1122 09:06:25.367077 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc"] Nov 22 09:06:25 crc kubenswrapper[4693]: W1122 09:06:25.378368 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd515f9c1_b7dc_41d8_ba9d_3bd6d1c8c6a3.slice/crio-03593ed1119a16a42f8e580d6f6306640ae506ea0abe1a20b229238d7d099964 WatchSource:0}: Error finding container 03593ed1119a16a42f8e580d6f6306640ae506ea0abe1a20b229238d7d099964: Status 404 returned error can't find the container with id 03593ed1119a16a42f8e580d6f6306640ae506ea0abe1a20b229238d7d099964 Nov 22 09:06:26 crc kubenswrapper[4693]: I1122 09:06:26.151633 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47e432ee-9866-4bab-b0d6-21183cc4698f" path="/var/lib/kubelet/pods/47e432ee-9866-4bab-b0d6-21183cc4698f/volumes" Nov 22 09:06:26 crc kubenswrapper[4693]: I1122 09:06:26.251564 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" event={"ID":"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3","Type":"ContainerStarted","Data":"32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736"} Nov 22 09:06:26 crc kubenswrapper[4693]: I1122 09:06:26.251608 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" event={"ID":"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3","Type":"ContainerStarted","Data":"03593ed1119a16a42f8e580d6f6306640ae506ea0abe1a20b229238d7d099964"} Nov 22 09:06:26 crc kubenswrapper[4693]: I1122 09:06:26.266191 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" podStartSLOduration=4.266177317 podStartE2EDuration="4.266177317s" podCreationTimestamp="2025-11-22 09:06:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:26.26339695 +0000 UTC m=+182.405899241" watchObservedRunningTime="2025-11-22 09:06:26.266177317 +0000 UTC m=+182.408679608" Nov 22 09:06:27 crc kubenswrapper[4693]: I1122 09:06:27.255096 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:27 crc kubenswrapper[4693]: I1122 09:06:27.258941 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:30 crc kubenswrapper[4693]: I1122 09:06:30.246089 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:06:30 crc kubenswrapper[4693]: I1122 09:06:30.246482 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:06:30 crc kubenswrapper[4693]: I1122 09:06:30.262395 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 22 09:06:32 crc kubenswrapper[4693]: I1122 09:06:32.725018 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" podUID="38a5280f-5933-40af-9c61-41f4766fc538" containerName="oauth-openshift" containerID="cri-o://64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b" gracePeriod=15 Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.085743 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181460 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-error\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181502 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-service-ca\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181527 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-ocp-branding-template\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181547 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-session\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181572 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-trusted-ca-bundle\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181612 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-router-certs\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181655 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsv85\" (UniqueName: \"kubernetes.io/projected/38a5280f-5933-40af-9c61-41f4766fc538-kube-api-access-qsv85\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181679 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-cliconfig\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181693 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-idp-0-file-data\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181710 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-audit-policies\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181726 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-serving-cert\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181743 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-provider-selection\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181772 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-login\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.181789 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a5280f-5933-40af-9c61-41f4766fc538-audit-dir\") pod \"38a5280f-5933-40af-9c61-41f4766fc538\" (UID: \"38a5280f-5933-40af-9c61-41f4766fc538\") " Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.182114 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.182415 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.182427 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.182450 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/38a5280f-5933-40af-9c61-41f4766fc538-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.182705 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.182728 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.187306 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.187614 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.187925 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38a5280f-5933-40af-9c61-41f4766fc538-kube-api-access-qsv85" (OuterVolumeSpecName: "kube-api-access-qsv85") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "kube-api-access-qsv85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.188814 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.188613 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.189199 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.189396 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.189617 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.189800 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "38a5280f-5933-40af-9c61-41f4766fc538" (UID: "38a5280f-5933-40af-9c61-41f4766fc538"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.277707 4693 generic.go:334] "Generic (PLEG): container finished" podID="38a5280f-5933-40af-9c61-41f4766fc538" containerID="64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b" exitCode=0 Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.277745 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" event={"ID":"38a5280f-5933-40af-9c61-41f4766fc538","Type":"ContainerDied","Data":"64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b"} Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.277770 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" event={"ID":"38a5280f-5933-40af-9c61-41f4766fc538","Type":"ContainerDied","Data":"5a4edb9ba9d37f1513ec801ff928016f1556803424b28a9c4ad9e02daf6ad81a"} Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.277785 4693 scope.go:117] "RemoveContainer" containerID="64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.277890 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-6s452" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.282914 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsv85\" (UniqueName: \"kubernetes.io/projected/38a5280f-5933-40af-9c61-41f4766fc538-kube-api-access-qsv85\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283006 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283074 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283134 4693 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283185 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283243 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283300 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283359 4693 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a5280f-5933-40af-9c61-41f4766fc538-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283433 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283484 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283539 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283591 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.283658 4693 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/38a5280f-5933-40af-9c61-41f4766fc538-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.292030 4693 scope.go:117] "RemoveContainer" containerID="64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b" Nov 22 09:06:33 crc kubenswrapper[4693]: E1122 09:06:33.292340 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b\": container with ID starting with 64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b not found: ID does not exist" containerID="64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.292383 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b"} err="failed to get container status \"64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b\": rpc error: code = NotFound desc = could not find container \"64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b\": container with ID starting with 64828601be17f40c592b62328759cd87464b88ab3ce6e8df8abc4dba5296b21b not found: ID does not exist" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.295313 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-6s452"] Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.298684 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-6s452"] Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.721186 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-f5cd6b797-xrvd4"] Nov 22 09:06:33 crc kubenswrapper[4693]: E1122 09:06:33.721350 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="38a5280f-5933-40af-9c61-41f4766fc538" containerName="oauth-openshift" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.721366 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="38a5280f-5933-40af-9c61-41f4766fc538" containerName="oauth-openshift" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.721450 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="38a5280f-5933-40af-9c61-41f4766fc538" containerName="oauth-openshift" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.721756 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.723892 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724036 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724154 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724225 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724259 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724301 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724369 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724578 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.724715 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.725552 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.725616 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.726202 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.730814 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-f5cd6b797-xrvd4"] Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.731197 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.732207 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.738833 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.789743 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-router-certs\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.789884 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-session\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.789940 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-service-ca\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.789967 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790045 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-audit-dir\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790101 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790138 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-login\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790165 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790211 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790249 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-error\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790282 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpgdh\" (UniqueName: \"kubernetes.io/projected/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-kube-api-access-lpgdh\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790317 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790345 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.790367 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-audit-policies\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.891974 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-audit-policies\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892012 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-router-certs\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892057 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-session\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892080 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-service-ca\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892095 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892118 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-audit-dir\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892678 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-audit-policies\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892686 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892746 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892763 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-login\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892797 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892817 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-error\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892832 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpgdh\" (UniqueName: \"kubernetes.io/projected/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-kube-api-access-lpgdh\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892867 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892886 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892671 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-audit-dir\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.892831 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-service-ca\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.893588 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-cliconfig\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.894021 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.895699 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896006 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-login\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896021 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-template-error\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896056 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-router-certs\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896199 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-session\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896227 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896245 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.896680 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-v4-0-config-system-serving-cert\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:33 crc kubenswrapper[4693]: I1122 09:06:33.905574 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpgdh\" (UniqueName: \"kubernetes.io/projected/7b74c9fb-38c7-4e49-8634-a97f6a3094f2-kube-api-access-lpgdh\") pod \"oauth-openshift-f5cd6b797-xrvd4\" (UID: \"7b74c9fb-38c7-4e49-8634-a97f6a3094f2\") " pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:34 crc kubenswrapper[4693]: I1122 09:06:34.032104 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:34 crc kubenswrapper[4693]: I1122 09:06:34.156046 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38a5280f-5933-40af-9c61-41f4766fc538" path="/var/lib/kubelet/pods/38a5280f-5933-40af-9c61-41f4766fc538/volumes" Nov 22 09:06:34 crc kubenswrapper[4693]: I1122 09:06:34.394699 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-f5cd6b797-xrvd4"] Nov 22 09:06:35 crc kubenswrapper[4693]: E1122 09:06:35.237786 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8\": RecentStats: unable to find data in memory cache]" Nov 22 09:06:35 crc kubenswrapper[4693]: I1122 09:06:35.287537 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:35 crc kubenswrapper[4693]: I1122 09:06:35.287726 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" event={"ID":"7b74c9fb-38c7-4e49-8634-a97f6a3094f2","Type":"ContainerStarted","Data":"2f4e66c1d72ea3df2a8d67288e444fdf3e9acefa46d9ff0ac070001fa0231f1f"} Nov 22 09:06:35 crc kubenswrapper[4693]: I1122 09:06:35.287740 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" event={"ID":"7b74c9fb-38c7-4e49-8634-a97f6a3094f2","Type":"ContainerStarted","Data":"3673a21c0a3bd7666f17ac64b7fb4abf6bfcb33f07239ff2b091ff4eb8fd8534"} Nov 22 09:06:35 crc kubenswrapper[4693]: I1122 09:06:35.295869 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" Nov 22 09:06:35 crc kubenswrapper[4693]: I1122 09:06:35.309407 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-f5cd6b797-xrvd4" podStartSLOduration=28.309393559 podStartE2EDuration="28.309393559s" podCreationTimestamp="2025-11-22 09:06:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:35.307768524 +0000 UTC m=+191.450270815" watchObservedRunningTime="2025-11-22 09:06:35.309393559 +0000 UTC m=+191.451895850" Nov 22 09:06:42 crc kubenswrapper[4693]: I1122 09:06:42.626654 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-64d758d556-mz4w8"] Nov 22 09:06:42 crc kubenswrapper[4693]: I1122 09:06:42.627400 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" podUID="a1dfd539-c463-47e6-83a3-31ee0a6e8202" containerName="controller-manager" containerID="cri-o://dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902" gracePeriod=30 Nov 22 09:06:42 crc kubenswrapper[4693]: I1122 09:06:42.651450 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc"] Nov 22 09:06:42 crc kubenswrapper[4693]: I1122 09:06:42.651633 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" podUID="d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" containerName="route-controller-manager" containerID="cri-o://32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736" gracePeriod=30 Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.043837 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.081203 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gkk94\" (UniqueName: \"kubernetes.io/projected/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-kube-api-access-gkk94\") pod \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.081252 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-client-ca\") pod \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.081322 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-serving-cert\") pod \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.081340 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-config\") pod \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\" (UID: \"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.082077 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-config" (OuterVolumeSpecName: "config") pod "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" (UID: "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.082273 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-client-ca" (OuterVolumeSpecName: "client-ca") pod "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" (UID: "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.085700 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-kube-api-access-gkk94" (OuterVolumeSpecName: "kube-api-access-gkk94") pod "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" (UID: "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3"). InnerVolumeSpecName "kube-api-access-gkk94". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.088913 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" (UID: "d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.108982 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182224 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-config\") pod \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182263 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dfd539-c463-47e6-83a3-31ee0a6e8202-serving-cert\") pod \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182282 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8wmbv\" (UniqueName: \"kubernetes.io/projected/a1dfd539-c463-47e6-83a3-31ee0a6e8202-kube-api-access-8wmbv\") pod \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182315 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-proxy-ca-bundles\") pod \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182337 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-client-ca\") pod \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\" (UID: \"a1dfd539-c463-47e6-83a3-31ee0a6e8202\") " Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182466 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182486 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182494 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gkk94\" (UniqueName: \"kubernetes.io/projected/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-kube-api-access-gkk94\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182502 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182868 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-config" (OuterVolumeSpecName: "config") pod "a1dfd539-c463-47e6-83a3-31ee0a6e8202" (UID: "a1dfd539-c463-47e6-83a3-31ee0a6e8202"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.182891 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-client-ca" (OuterVolumeSpecName: "client-ca") pod "a1dfd539-c463-47e6-83a3-31ee0a6e8202" (UID: "a1dfd539-c463-47e6-83a3-31ee0a6e8202"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.183035 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "a1dfd539-c463-47e6-83a3-31ee0a6e8202" (UID: "a1dfd539-c463-47e6-83a3-31ee0a6e8202"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.185157 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1dfd539-c463-47e6-83a3-31ee0a6e8202-kube-api-access-8wmbv" (OuterVolumeSpecName: "kube-api-access-8wmbv") pod "a1dfd539-c463-47e6-83a3-31ee0a6e8202" (UID: "a1dfd539-c463-47e6-83a3-31ee0a6e8202"). InnerVolumeSpecName "kube-api-access-8wmbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.185230 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1dfd539-c463-47e6-83a3-31ee0a6e8202-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a1dfd539-c463-47e6-83a3-31ee0a6e8202" (UID: "a1dfd539-c463-47e6-83a3-31ee0a6e8202"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.283698 4693 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1dfd539-c463-47e6-83a3-31ee0a6e8202-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.283912 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8wmbv\" (UniqueName: \"kubernetes.io/projected/a1dfd539-c463-47e6-83a3-31ee0a6e8202-kube-api-access-8wmbv\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.283979 4693 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.284049 4693 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-client-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.284109 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1dfd539-c463-47e6-83a3-31ee0a6e8202-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.317476 4693 generic.go:334] "Generic (PLEG): container finished" podID="a1dfd539-c463-47e6-83a3-31ee0a6e8202" containerID="dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902" exitCode=0 Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.317522 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.317518 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" event={"ID":"a1dfd539-c463-47e6-83a3-31ee0a6e8202","Type":"ContainerDied","Data":"dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902"} Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.317567 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-64d758d556-mz4w8" event={"ID":"a1dfd539-c463-47e6-83a3-31ee0a6e8202","Type":"ContainerDied","Data":"875d1cd6c96c47f9a1eac39eb63faa707698c93aa585d06af03a637c39727ed0"} Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.317587 4693 scope.go:117] "RemoveContainer" containerID="dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.319169 4693 generic.go:334] "Generic (PLEG): container finished" podID="d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" containerID="32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736" exitCode=0 Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.319215 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.319230 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" event={"ID":"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3","Type":"ContainerDied","Data":"32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736"} Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.319405 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc" event={"ID":"d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3","Type":"ContainerDied","Data":"03593ed1119a16a42f8e580d6f6306640ae506ea0abe1a20b229238d7d099964"} Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.328375 4693 scope.go:117] "RemoveContainer" containerID="dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902" Nov 22 09:06:43 crc kubenswrapper[4693]: E1122 09:06:43.328788 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902\": container with ID starting with dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902 not found: ID does not exist" containerID="dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.328815 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902"} err="failed to get container status \"dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902\": rpc error: code = NotFound desc = could not find container \"dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902\": container with ID starting with dbfb70ed53e8a654a60b0233fc52cf2131490028973db50d330a5751916a6902 not found: ID does not exist" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.328831 4693 scope.go:117] "RemoveContainer" containerID="32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.337925 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-64d758d556-mz4w8"] Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.339496 4693 scope.go:117] "RemoveContainer" containerID="32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.339691 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-64d758d556-mz4w8"] Nov 22 09:06:43 crc kubenswrapper[4693]: E1122 09:06:43.339857 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736\": container with ID starting with 32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736 not found: ID does not exist" containerID="32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.339879 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736"} err="failed to get container status \"32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736\": rpc error: code = NotFound desc = could not find container \"32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736\": container with ID starting with 32605711204c2b0eb547089719d555599a497e3e868ffd4317f80927713e8736 not found: ID does not exist" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.345547 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc"] Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.348509 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7d5b594984-zbwcc"] Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.728208 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-bb77698-8rtnb"] Nov 22 09:06:43 crc kubenswrapper[4693]: E1122 09:06:43.728420 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" containerName="route-controller-manager" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.728432 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" containerName="route-controller-manager" Nov 22 09:06:43 crc kubenswrapper[4693]: E1122 09:06:43.728445 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1dfd539-c463-47e6-83a3-31ee0a6e8202" containerName="controller-manager" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.728452 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1dfd539-c463-47e6-83a3-31ee0a6e8202" containerName="controller-manager" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.728545 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" containerName="route-controller-manager" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.728555 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1dfd539-c463-47e6-83a3-31ee0a6e8202" containerName="controller-manager" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.728930 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.730765 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz"] Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.730889 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.730964 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.731093 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.731132 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.731359 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.731940 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.732964 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.732971 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.733021 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.733101 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.733328 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.733566 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.736767 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.738151 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-bb77698-8rtnb"] Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.738895 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.740032 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz"] Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.789886 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-client-ca\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.789935 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67f7587b-8a02-4a02-a875-9d0e94bc71b0-serving-cert\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.789958 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpdvd\" (UniqueName: \"kubernetes.io/projected/78306d2e-b25d-42cd-bd79-daf7fe160ad2-kube-api-access-hpdvd\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.789977 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/78306d2e-b25d-42cd-bd79-daf7fe160ad2-client-ca\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.790025 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-proxy-ca-bundles\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.790062 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-config\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.790091 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/78306d2e-b25d-42cd-bd79-daf7fe160ad2-serving-cert\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.790112 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc5ff\" (UniqueName: \"kubernetes.io/projected/67f7587b-8a02-4a02-a875-9d0e94bc71b0-kube-api-access-jc5ff\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.790178 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78306d2e-b25d-42cd-bd79-daf7fe160ad2-config\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891416 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78306d2e-b25d-42cd-bd79-daf7fe160ad2-config\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891464 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-client-ca\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891495 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67f7587b-8a02-4a02-a875-9d0e94bc71b0-serving-cert\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891516 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpdvd\" (UniqueName: \"kubernetes.io/projected/78306d2e-b25d-42cd-bd79-daf7fe160ad2-kube-api-access-hpdvd\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891534 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/78306d2e-b25d-42cd-bd79-daf7fe160ad2-client-ca\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891559 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-proxy-ca-bundles\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891578 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-config\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891597 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/78306d2e-b25d-42cd-bd79-daf7fe160ad2-serving-cert\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.891616 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc5ff\" (UniqueName: \"kubernetes.io/projected/67f7587b-8a02-4a02-a875-9d0e94bc71b0-kube-api-access-jc5ff\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.892377 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/78306d2e-b25d-42cd-bd79-daf7fe160ad2-client-ca\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.892493 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-proxy-ca-bundles\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.892562 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/78306d2e-b25d-42cd-bd79-daf7fe160ad2-config\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.892599 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-client-ca\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.892885 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/67f7587b-8a02-4a02-a875-9d0e94bc71b0-config\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.894752 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/78306d2e-b25d-42cd-bd79-daf7fe160ad2-serving-cert\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.897220 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/67f7587b-8a02-4a02-a875-9d0e94bc71b0-serving-cert\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.903891 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc5ff\" (UniqueName: \"kubernetes.io/projected/67f7587b-8a02-4a02-a875-9d0e94bc71b0-kube-api-access-jc5ff\") pod \"controller-manager-bb77698-8rtnb\" (UID: \"67f7587b-8a02-4a02-a875-9d0e94bc71b0\") " pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:43 crc kubenswrapper[4693]: I1122 09:06:43.904559 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpdvd\" (UniqueName: \"kubernetes.io/projected/78306d2e-b25d-42cd-bd79-daf7fe160ad2-kube-api-access-hpdvd\") pod \"route-controller-manager-5cb95679b6-8n4mz\" (UID: \"78306d2e-b25d-42cd-bd79-daf7fe160ad2\") " pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:44 crc kubenswrapper[4693]: I1122 09:06:44.040957 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:44 crc kubenswrapper[4693]: I1122 09:06:44.048135 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:44 crc kubenswrapper[4693]: I1122 09:06:44.154184 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1dfd539-c463-47e6-83a3-31ee0a6e8202" path="/var/lib/kubelet/pods/a1dfd539-c463-47e6-83a3-31ee0a6e8202/volumes" Nov 22 09:06:44 crc kubenswrapper[4693]: I1122 09:06:44.154938 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3" path="/var/lib/kubelet/pods/d515f9c1-b7dc-41d8-ba9d-3bd6d1c8c6a3/volumes" Nov 22 09:06:44 crc kubenswrapper[4693]: I1122 09:06:44.372562 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-bb77698-8rtnb"] Nov 22 09:06:44 crc kubenswrapper[4693]: W1122 09:06:44.377203 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67f7587b_8a02_4a02_a875_9d0e94bc71b0.slice/crio-21fd98b2daa06cbeb2534c9c6c7bc37a1540d1808760993b5a4ea1be85246251 WatchSource:0}: Error finding container 21fd98b2daa06cbeb2534c9c6c7bc37a1540d1808760993b5a4ea1be85246251: Status 404 returned error can't find the container with id 21fd98b2daa06cbeb2534c9c6c7bc37a1540d1808760993b5a4ea1be85246251 Nov 22 09:06:44 crc kubenswrapper[4693]: I1122 09:06:44.413993 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz"] Nov 22 09:06:44 crc kubenswrapper[4693]: W1122 09:06:44.427906 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78306d2e_b25d_42cd_bd79_daf7fe160ad2.slice/crio-4fa9de79e0d032dca04a5b22e456512f875131e6e21bac42e6f4f2135bc17c3a WatchSource:0}: Error finding container 4fa9de79e0d032dca04a5b22e456512f875131e6e21bac42e6f4f2135bc17c3a: Status 404 returned error can't find the container with id 4fa9de79e0d032dca04a5b22e456512f875131e6e21bac42e6f4f2135bc17c3a Nov 22 09:06:45 crc kubenswrapper[4693]: E1122 09:06:45.330447 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8\": RecentStats: unable to find data in memory cache]" Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.339064 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" event={"ID":"78306d2e-b25d-42cd-bd79-daf7fe160ad2","Type":"ContainerStarted","Data":"a943e426100cd1f5f3270f938dbcb7a55d1b0b6d1ae35e760c77b49df7a1a859"} Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.339105 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" event={"ID":"78306d2e-b25d-42cd-bd79-daf7fe160ad2","Type":"ContainerStarted","Data":"4fa9de79e0d032dca04a5b22e456512f875131e6e21bac42e6f4f2135bc17c3a"} Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.341402 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.344674 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" event={"ID":"67f7587b-8a02-4a02-a875-9d0e94bc71b0","Type":"ContainerStarted","Data":"5467573d4be8632cca24f4e0de21f4a85ddddfbf2d29faa425c9859cfc2658ff"} Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.344713 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" event={"ID":"67f7587b-8a02-4a02-a875-9d0e94bc71b0","Type":"ContainerStarted","Data":"21fd98b2daa06cbeb2534c9c6c7bc37a1540d1808760993b5a4ea1be85246251"} Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.345615 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.348064 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.350035 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.354967 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5cb95679b6-8n4mz" podStartSLOduration=3.354957916 podStartE2EDuration="3.354957916s" podCreationTimestamp="2025-11-22 09:06:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:45.353646537 +0000 UTC m=+201.496148828" watchObservedRunningTime="2025-11-22 09:06:45.354957916 +0000 UTC m=+201.497460207" Nov 22 09:06:45 crc kubenswrapper[4693]: I1122 09:06:45.369593 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-bb77698-8rtnb" podStartSLOduration=3.369584976 podStartE2EDuration="3.369584976s" podCreationTimestamp="2025-11-22 09:06:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:06:45.368748721 +0000 UTC m=+201.511251012" watchObservedRunningTime="2025-11-22 09:06:45.369584976 +0000 UTC m=+201.512087267" Nov 22 09:06:55 crc kubenswrapper[4693]: E1122 09:06:55.428157 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8\": RecentStats: unable to find data in memory cache]" Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.246558 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.246804 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.246857 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.247176 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.247221 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de" gracePeriod=600 Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.412742 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de" exitCode=0 Nov 22 09:07:00 crc kubenswrapper[4693]: I1122 09:07:00.412817 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de"} Nov 22 09:07:01 crc kubenswrapper[4693]: I1122 09:07:01.419499 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"8e9b2098a9f7737f670f889afec51d85ba47f7877ce014bd4df47229fe7ecbfd"} Nov 22 09:07:05 crc kubenswrapper[4693]: E1122 09:07:05.521739 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8\": RecentStats: unable to find data in memory cache]" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.012491 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f6rdl"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.013245 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-f6rdl" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="registry-server" containerID="cri-o://ef7f3d2efdf6c7739853c586ee572b93f1cd51bb87a76f921e267e2dd8a01199" gracePeriod=30 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.024411 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hwwtg"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.024627 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-hwwtg" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="registry-server" containerID="cri-o://f03b110816ac8d48273655d210ab8d84a12521a76e90bf968bb5f12c5c33d678" gracePeriod=30 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.026786 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7skdb"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.026993 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerName="marketplace-operator" containerID="cri-o://dfc26093727b8f92d963a229244b4cefce8f57b35244fe087262c5ad611f4027" gracePeriod=30 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.032404 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rbw9"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.032570 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4rbw9" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="registry-server" containerID="cri-o://63d63e46965040e4e07700997d880009025844d8f89e6af39e76d223f6e8c6ee" gracePeriod=30 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.043164 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mrzgw"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.043735 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.047369 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4mxcr"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.047573 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-4mxcr" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="registry-server" containerID="cri-o://1279d1d415832fd3b23f5faec78b2dc0eacabdc326137fa3dde6539ad014010f" gracePeriod=30 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.056646 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mrzgw"] Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.130913 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/651abef9-77a9-4b60-9522-af17781c7a4b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.130961 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/651abef9-77a9-4b60-9522-af17781c7a4b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.131006 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wng94\" (UniqueName: \"kubernetes.io/projected/651abef9-77a9-4b60-9522-af17781c7a4b-kube-api-access-wng94\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.232291 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/651abef9-77a9-4b60-9522-af17781c7a4b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.232634 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/651abef9-77a9-4b60-9522-af17781c7a4b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.232726 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wng94\" (UniqueName: \"kubernetes.io/projected/651abef9-77a9-4b60-9522-af17781c7a4b-kube-api-access-wng94\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.233979 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/651abef9-77a9-4b60-9522-af17781c7a4b-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.239673 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/651abef9-77a9-4b60-9522-af17781c7a4b-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.247757 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wng94\" (UniqueName: \"kubernetes.io/projected/651abef9-77a9-4b60-9522-af17781c7a4b-kube-api-access-wng94\") pod \"marketplace-operator-79b997595-mrzgw\" (UID: \"651abef9-77a9-4b60-9522-af17781c7a4b\") " pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.365413 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.444703 4693 generic.go:334] "Generic (PLEG): container finished" podID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerID="dfc26093727b8f92d963a229244b4cefce8f57b35244fe087262c5ad611f4027" exitCode=0 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.444788 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" event={"ID":"6ba8530d-18bd-4021-8187-6c716bc87a32","Type":"ContainerDied","Data":"dfc26093727b8f92d963a229244b4cefce8f57b35244fe087262c5ad611f4027"} Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.446056 4693 generic.go:334] "Generic (PLEG): container finished" podID="79fc98da-aee2-436c-823b-5e608446dc29" containerID="f03b110816ac8d48273655d210ab8d84a12521a76e90bf968bb5f12c5c33d678" exitCode=0 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.446102 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwwtg" event={"ID":"79fc98da-aee2-436c-823b-5e608446dc29","Type":"ContainerDied","Data":"f03b110816ac8d48273655d210ab8d84a12521a76e90bf968bb5f12c5c33d678"} Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.447825 4693 generic.go:334] "Generic (PLEG): container finished" podID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerID="ef7f3d2efdf6c7739853c586ee572b93f1cd51bb87a76f921e267e2dd8a01199" exitCode=0 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.447883 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f6rdl" event={"ID":"acb14103-18ad-40be-b7e1-2fe99282f86c","Type":"ContainerDied","Data":"ef7f3d2efdf6c7739853c586ee572b93f1cd51bb87a76f921e267e2dd8a01199"} Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.447900 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-f6rdl" event={"ID":"acb14103-18ad-40be-b7e1-2fe99282f86c","Type":"ContainerDied","Data":"b9a9d459cbfb8f3ca894d2d90a31cf3c57d554537ae2243c826f1ca439faec4e"} Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.447909 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b9a9d459cbfb8f3ca894d2d90a31cf3c57d554537ae2243c826f1ca439faec4e" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.449537 4693 generic.go:334] "Generic (PLEG): container finished" podID="7684c092-86da-49ca-97d4-dfcebb032adf" containerID="63d63e46965040e4e07700997d880009025844d8f89e6af39e76d223f6e8c6ee" exitCode=0 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.449573 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rbw9" event={"ID":"7684c092-86da-49ca-97d4-dfcebb032adf","Type":"ContainerDied","Data":"63d63e46965040e4e07700997d880009025844d8f89e6af39e76d223f6e8c6ee"} Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.450853 4693 generic.go:334] "Generic (PLEG): container finished" podID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerID="1279d1d415832fd3b23f5faec78b2dc0eacabdc326137fa3dde6539ad014010f" exitCode=0 Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.450878 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerDied","Data":"1279d1d415832fd3b23f5faec78b2dc0eacabdc326137fa3dde6539ad014010f"} Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.547781 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.598816 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.603344 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.610226 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.611036 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635291 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-utilities\") pod \"7684c092-86da-49ca-97d4-dfcebb032adf\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635358 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2zm5\" (UniqueName: \"kubernetes.io/projected/79fc98da-aee2-436c-823b-5e608446dc29-kube-api-access-q2zm5\") pod \"79fc98da-aee2-436c-823b-5e608446dc29\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635387 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-catalog-content\") pod \"7684c092-86da-49ca-97d4-dfcebb032adf\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635436 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4b795\" (UniqueName: \"kubernetes.io/projected/7684c092-86da-49ca-97d4-dfcebb032adf-kube-api-access-4b795\") pod \"7684c092-86da-49ca-97d4-dfcebb032adf\" (UID: \"7684c092-86da-49ca-97d4-dfcebb032adf\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635457 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-catalog-content\") pod \"acb14103-18ad-40be-b7e1-2fe99282f86c\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635472 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hfgf9\" (UniqueName: \"kubernetes.io/projected/6ba8530d-18bd-4021-8187-6c716bc87a32-kube-api-access-hfgf9\") pod \"6ba8530d-18bd-4021-8187-6c716bc87a32\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635496 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8r7cw\" (UniqueName: \"kubernetes.io/projected/acb14103-18ad-40be-b7e1-2fe99282f86c-kube-api-access-8r7cw\") pod \"acb14103-18ad-40be-b7e1-2fe99282f86c\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635589 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ml9nq\" (UniqueName: \"kubernetes.io/projected/ff7eb3fe-9db6-48e9-87c8-707818512fb5-kube-api-access-ml9nq\") pod \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635608 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-trusted-ca\") pod \"6ba8530d-18bd-4021-8187-6c716bc87a32\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635641 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-utilities\") pod \"79fc98da-aee2-436c-823b-5e608446dc29\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635660 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-operator-metrics\") pod \"6ba8530d-18bd-4021-8187-6c716bc87a32\" (UID: \"6ba8530d-18bd-4021-8187-6c716bc87a32\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635687 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-utilities\") pod \"acb14103-18ad-40be-b7e1-2fe99282f86c\" (UID: \"acb14103-18ad-40be-b7e1-2fe99282f86c\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635718 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-utilities\") pod \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635756 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-catalog-content\") pod \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\" (UID: \"ff7eb3fe-9db6-48e9-87c8-707818512fb5\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.635788 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-catalog-content\") pod \"79fc98da-aee2-436c-823b-5e608446dc29\" (UID: \"79fc98da-aee2-436c-823b-5e608446dc29\") " Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.636318 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-utilities" (OuterVolumeSpecName: "utilities") pod "79fc98da-aee2-436c-823b-5e608446dc29" (UID: "79fc98da-aee2-436c-823b-5e608446dc29"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.636479 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "6ba8530d-18bd-4021-8187-6c716bc87a32" (UID: "6ba8530d-18bd-4021-8187-6c716bc87a32"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.636522 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-utilities" (OuterVolumeSpecName: "utilities") pod "acb14103-18ad-40be-b7e1-2fe99282f86c" (UID: "acb14103-18ad-40be-b7e1-2fe99282f86c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.637306 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-utilities" (OuterVolumeSpecName: "utilities") pod "ff7eb3fe-9db6-48e9-87c8-707818512fb5" (UID: "ff7eb3fe-9db6-48e9-87c8-707818512fb5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.637633 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-utilities" (OuterVolumeSpecName: "utilities") pod "7684c092-86da-49ca-97d4-dfcebb032adf" (UID: "7684c092-86da-49ca-97d4-dfcebb032adf"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.640247 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/acb14103-18ad-40be-b7e1-2fe99282f86c-kube-api-access-8r7cw" (OuterVolumeSpecName: "kube-api-access-8r7cw") pod "acb14103-18ad-40be-b7e1-2fe99282f86c" (UID: "acb14103-18ad-40be-b7e1-2fe99282f86c"). InnerVolumeSpecName "kube-api-access-8r7cw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.640330 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ba8530d-18bd-4021-8187-6c716bc87a32-kube-api-access-hfgf9" (OuterVolumeSpecName: "kube-api-access-hfgf9") pod "6ba8530d-18bd-4021-8187-6c716bc87a32" (UID: "6ba8530d-18bd-4021-8187-6c716bc87a32"). InnerVolumeSpecName "kube-api-access-hfgf9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.641354 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "6ba8530d-18bd-4021-8187-6c716bc87a32" (UID: "6ba8530d-18bd-4021-8187-6c716bc87a32"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.641166 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/79fc98da-aee2-436c-823b-5e608446dc29-kube-api-access-q2zm5" (OuterVolumeSpecName: "kube-api-access-q2zm5") pod "79fc98da-aee2-436c-823b-5e608446dc29" (UID: "79fc98da-aee2-436c-823b-5e608446dc29"). InnerVolumeSpecName "kube-api-access-q2zm5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.643169 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff7eb3fe-9db6-48e9-87c8-707818512fb5-kube-api-access-ml9nq" (OuterVolumeSpecName: "kube-api-access-ml9nq") pod "ff7eb3fe-9db6-48e9-87c8-707818512fb5" (UID: "ff7eb3fe-9db6-48e9-87c8-707818512fb5"). InnerVolumeSpecName "kube-api-access-ml9nq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.662312 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7684c092-86da-49ca-97d4-dfcebb032adf-kube-api-access-4b795" (OuterVolumeSpecName: "kube-api-access-4b795") pod "7684c092-86da-49ca-97d4-dfcebb032adf" (UID: "7684c092-86da-49ca-97d4-dfcebb032adf"). InnerVolumeSpecName "kube-api-access-4b795". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.670497 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7684c092-86da-49ca-97d4-dfcebb032adf" (UID: "7684c092-86da-49ca-97d4-dfcebb032adf"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.698483 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "acb14103-18ad-40be-b7e1-2fe99282f86c" (UID: "acb14103-18ad-40be-b7e1-2fe99282f86c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.698549 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "79fc98da-aee2-436c-823b-5e608446dc29" (UID: "79fc98da-aee2-436c-823b-5e608446dc29"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.728323 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff7eb3fe-9db6-48e9-87c8-707818512fb5" (UID: "ff7eb3fe-9db6-48e9-87c8-707818512fb5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737078 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737107 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff7eb3fe-9db6-48e9-87c8-707818512fb5-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737119 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737127 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737160 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2zm5\" (UniqueName: \"kubernetes.io/projected/79fc98da-aee2-436c-823b-5e608446dc29-kube-api-access-q2zm5\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737171 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7684c092-86da-49ca-97d4-dfcebb032adf-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737180 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4b795\" (UniqueName: \"kubernetes.io/projected/7684c092-86da-49ca-97d4-dfcebb032adf-kube-api-access-4b795\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737191 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737199 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hfgf9\" (UniqueName: \"kubernetes.io/projected/6ba8530d-18bd-4021-8187-6c716bc87a32-kube-api-access-hfgf9\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737208 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8r7cw\" (UniqueName: \"kubernetes.io/projected/acb14103-18ad-40be-b7e1-2fe99282f86c-kube-api-access-8r7cw\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737216 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ml9nq\" (UniqueName: \"kubernetes.io/projected/ff7eb3fe-9db6-48e9-87c8-707818512fb5-kube-api-access-ml9nq\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737226 4693 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737233 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/79fc98da-aee2-436c-823b-5e608446dc29-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737241 4693 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6ba8530d-18bd-4021-8187-6c716bc87a32-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.737250 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/acb14103-18ad-40be-b7e1-2fe99282f86c-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:07:07 crc kubenswrapper[4693]: I1122 09:07:07.834936 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-mrzgw"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.456012 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" event={"ID":"6ba8530d-18bd-4021-8187-6c716bc87a32","Type":"ContainerDied","Data":"c630a55feb1022a1449fe70adc50e9f15050aee2b951f03e34715b13f4c015c6"} Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.456067 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-7skdb" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.456256 4693 scope.go:117] "RemoveContainer" containerID="dfc26093727b8f92d963a229244b4cefce8f57b35244fe087262c5ad611f4027" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.457355 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" event={"ID":"651abef9-77a9-4b60-9522-af17781c7a4b","Type":"ContainerStarted","Data":"982fb1484424f45a6dba5ab3da449c8c5ea348adedb85e6ac4e24b2c565a1789"} Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.457388 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" event={"ID":"651abef9-77a9-4b60-9522-af17781c7a4b","Type":"ContainerStarted","Data":"8feeca77beac63671e7bb655adb89a7b56601cdb054cad25a375759e89777cd4"} Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.457431 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.459043 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-hwwtg" event={"ID":"79fc98da-aee2-436c-823b-5e608446dc29","Type":"ContainerDied","Data":"f5108ac55a6b26e0535e33bb0417ab364ab8da2737e6d25f14bbfa075e421d68"} Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.459156 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-hwwtg" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.460313 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.462535 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4rbw9" event={"ID":"7684c092-86da-49ca-97d4-dfcebb032adf","Type":"ContainerDied","Data":"ce3156c57d478c635f3e322e8ad2a3874fee6fb49fd6c264e865f33592843204"} Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.462620 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4rbw9" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.465300 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-f6rdl" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.465317 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-4mxcr" event={"ID":"ff7eb3fe-9db6-48e9-87c8-707818512fb5","Type":"ContainerDied","Data":"1d51c04a1acd8af9f5f37c9229876d32567c9506009408775fdaeffdbdf22e6a"} Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.465308 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-4mxcr" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.466373 4693 scope.go:117] "RemoveContainer" containerID="f03b110816ac8d48273655d210ab8d84a12521a76e90bf968bb5f12c5c33d678" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.476251 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-mrzgw" podStartSLOduration=1.476241468 podStartE2EDuration="1.476241468s" podCreationTimestamp="2025-11-22 09:07:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:07:08.475089769 +0000 UTC m=+224.617592060" watchObservedRunningTime="2025-11-22 09:07:08.476241468 +0000 UTC m=+224.618743759" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.477701 4693 scope.go:117] "RemoveContainer" containerID="dd2ca2c50994bb99cd3b09863e6d342d7d1baa89e5b2259214e17904ce088cb0" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.492031 4693 scope.go:117] "RemoveContainer" containerID="28fa0d7127de4219ebeb9e74c61e6a67b8417e2d3f588bcad115261a1ef70adf" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.497687 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7skdb"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.500566 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-7skdb"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.508724 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rbw9"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.508792 4693 scope.go:117] "RemoveContainer" containerID="63d63e46965040e4e07700997d880009025844d8f89e6af39e76d223f6e8c6ee" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.510323 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4rbw9"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.515053 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-hwwtg"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.521027 4693 scope.go:117] "RemoveContainer" containerID="2448ed9afe7eb1f6c1123da1d8042dcb92fd5da4e1e4cd5ee1d6ae7e4159f4c0" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.524735 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-hwwtg"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.533116 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-f6rdl"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.538102 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-f6rdl"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.538803 4693 scope.go:117] "RemoveContainer" containerID="d4bc4a9f4de1093eaa785bd8be091f11b4fcdd7e59df768c02583d95875690e3" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.540690 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-4mxcr"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.542424 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-4mxcr"] Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.548248 4693 scope.go:117] "RemoveContainer" containerID="1279d1d415832fd3b23f5faec78b2dc0eacabdc326137fa3dde6539ad014010f" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.557641 4693 scope.go:117] "RemoveContainer" containerID="04620cdd1ccaf8754a801685f75a4747038c88c82d60376d2dab217b05a1a2e5" Nov 22 09:07:08 crc kubenswrapper[4693]: I1122 09:07:08.567809 4693 scope.go:117] "RemoveContainer" containerID="fbeddd9ede75cbc6c28291dde5870607eb4134b4cc762678a1aa96f184b9060c" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.226998 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-97l4g"] Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227369 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227386 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227396 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227402 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227410 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227416 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227424 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227429 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227437 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227442 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227449 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerName="marketplace-operator" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227454 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerName="marketplace-operator" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227460 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227466 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227474 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227480 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227490 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227495 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227503 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227508 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="extract-utilities" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227516 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227521 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227528 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227533 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: E1122 09:07:09.227540 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227545 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="extract-content" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227617 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" containerName="marketplace-operator" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227626 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227633 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227640 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="79fc98da-aee2-436c-823b-5e608446dc29" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.227645 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" containerName="registry-server" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.228253 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.244068 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-97l4g"] Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.246089 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.252908 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpzdl\" (UniqueName: \"kubernetes.io/projected/98f8038a-33d0-416d-bbb8-54004a3fb6fe-kube-api-access-bpzdl\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.252992 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f8038a-33d0-416d-bbb8-54004a3fb6fe-catalog-content\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.253048 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f8038a-33d0-416d-bbb8-54004a3fb6fe-utilities\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.354259 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f8038a-33d0-416d-bbb8-54004a3fb6fe-catalog-content\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.354312 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f8038a-33d0-416d-bbb8-54004a3fb6fe-utilities\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.354348 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpzdl\" (UniqueName: \"kubernetes.io/projected/98f8038a-33d0-416d-bbb8-54004a3fb6fe-kube-api-access-bpzdl\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.354763 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98f8038a-33d0-416d-bbb8-54004a3fb6fe-catalog-content\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.354771 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98f8038a-33d0-416d-bbb8-54004a3fb6fe-utilities\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.369094 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpzdl\" (UniqueName: \"kubernetes.io/projected/98f8038a-33d0-416d-bbb8-54004a3fb6fe-kube-api-access-bpzdl\") pod \"redhat-marketplace-97l4g\" (UID: \"98f8038a-33d0-416d-bbb8-54004a3fb6fe\") " pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.425143 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-9bbbm"] Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.426082 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.427871 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.432202 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9bbbm"] Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.455070 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75427b74-79ce-4837-95d0-a5b9b25ac98d-catalog-content\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.455103 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6z5gg\" (UniqueName: \"kubernetes.io/projected/75427b74-79ce-4837-95d0-a5b9b25ac98d-kube-api-access-6z5gg\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.455138 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75427b74-79ce-4837-95d0-a5b9b25ac98d-utilities\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.556091 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.556195 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75427b74-79ce-4837-95d0-a5b9b25ac98d-catalog-content\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.556224 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6z5gg\" (UniqueName: \"kubernetes.io/projected/75427b74-79ce-4837-95d0-a5b9b25ac98d-kube-api-access-6z5gg\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.556264 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75427b74-79ce-4837-95d0-a5b9b25ac98d-utilities\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.557078 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/75427b74-79ce-4837-95d0-a5b9b25ac98d-catalog-content\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.557325 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/75427b74-79ce-4837-95d0-a5b9b25ac98d-utilities\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.570162 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6z5gg\" (UniqueName: \"kubernetes.io/projected/75427b74-79ce-4837-95d0-a5b9b25ac98d-kube-api-access-6z5gg\") pod \"redhat-operators-9bbbm\" (UID: \"75427b74-79ce-4837-95d0-a5b9b25ac98d\") " pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.749930 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:09 crc kubenswrapper[4693]: I1122 09:07:09.885548 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-97l4g"] Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.085666 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-9bbbm"] Nov 22 09:07:10 crc kubenswrapper[4693]: W1122 09:07:10.111059 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod75427b74_79ce_4837_95d0_a5b9b25ac98d.slice/crio-b1281249a090986760db7b68330e5b5c4e6ed00f1b97192b8a02e78913ba6c30 WatchSource:0}: Error finding container b1281249a090986760db7b68330e5b5c4e6ed00f1b97192b8a02e78913ba6c30: Status 404 returned error can't find the container with id b1281249a090986760db7b68330e5b5c4e6ed00f1b97192b8a02e78913ba6c30 Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.150489 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ba8530d-18bd-4021-8187-6c716bc87a32" path="/var/lib/kubelet/pods/6ba8530d-18bd-4021-8187-6c716bc87a32/volumes" Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.151071 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7684c092-86da-49ca-97d4-dfcebb032adf" path="/var/lib/kubelet/pods/7684c092-86da-49ca-97d4-dfcebb032adf/volumes" Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.151591 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="79fc98da-aee2-436c-823b-5e608446dc29" path="/var/lib/kubelet/pods/79fc98da-aee2-436c-823b-5e608446dc29/volumes" Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.152144 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="acb14103-18ad-40be-b7e1-2fe99282f86c" path="/var/lib/kubelet/pods/acb14103-18ad-40be-b7e1-2fe99282f86c/volumes" Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.152662 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff7eb3fe-9db6-48e9-87c8-707818512fb5" path="/var/lib/kubelet/pods/ff7eb3fe-9db6-48e9-87c8-707818512fb5/volumes" Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.479745 4693 generic.go:334] "Generic (PLEG): container finished" podID="75427b74-79ce-4837-95d0-a5b9b25ac98d" containerID="65e5a3e62243fdb68cdb154eb6028727743391424be8e5ef2780ac28e9d1b63a" exitCode=0 Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.479815 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9bbbm" event={"ID":"75427b74-79ce-4837-95d0-a5b9b25ac98d","Type":"ContainerDied","Data":"65e5a3e62243fdb68cdb154eb6028727743391424be8e5ef2780ac28e9d1b63a"} Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.479870 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9bbbm" event={"ID":"75427b74-79ce-4837-95d0-a5b9b25ac98d","Type":"ContainerStarted","Data":"b1281249a090986760db7b68330e5b5c4e6ed00f1b97192b8a02e78913ba6c30"} Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.481228 4693 generic.go:334] "Generic (PLEG): container finished" podID="98f8038a-33d0-416d-bbb8-54004a3fb6fe" containerID="8fe6244970c1882999bc16de98ad52246cd80352d1b65e1a538c32e82a47351d" exitCode=0 Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.481286 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-97l4g" event={"ID":"98f8038a-33d0-416d-bbb8-54004a3fb6fe","Type":"ContainerDied","Data":"8fe6244970c1882999bc16de98ad52246cd80352d1b65e1a538c32e82a47351d"} Nov 22 09:07:10 crc kubenswrapper[4693]: I1122 09:07:10.481306 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-97l4g" event={"ID":"98f8038a-33d0-416d-bbb8-54004a3fb6fe","Type":"ContainerStarted","Data":"d1995b3daf89b98d2c4cb3c8ec69eeaed63cbe83e44329fed66eb1d7a710b98d"} Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.486089 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9bbbm" event={"ID":"75427b74-79ce-4837-95d0-a5b9b25ac98d","Type":"ContainerStarted","Data":"e89d68ee6dca40e740dd41bcd45a21ecf19a53c62ea73f908bdac39d31d352e4"} Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.488145 4693 generic.go:334] "Generic (PLEG): container finished" podID="98f8038a-33d0-416d-bbb8-54004a3fb6fe" containerID="43acc843e326b08b605df5697324aa096ab9b8b0dbd9dd23a72a3ba7a070f817" exitCode=0 Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.488172 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-97l4g" event={"ID":"98f8038a-33d0-416d-bbb8-54004a3fb6fe","Type":"ContainerDied","Data":"43acc843e326b08b605df5697324aa096ab9b8b0dbd9dd23a72a3ba7a070f817"} Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.625564 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-k6mmk"] Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.626584 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.627901 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.633704 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k6mmk"] Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.678996 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/462930d8-4523-48fd-8fb0-fb2a23ff0445-catalog-content\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.679057 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/462930d8-4523-48fd-8fb0-fb2a23ff0445-utilities\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.679132 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k2kb\" (UniqueName: \"kubernetes.io/projected/462930d8-4523-48fd-8fb0-fb2a23ff0445-kube-api-access-6k2kb\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.780550 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k2kb\" (UniqueName: \"kubernetes.io/projected/462930d8-4523-48fd-8fb0-fb2a23ff0445-kube-api-access-6k2kb\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.780623 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/462930d8-4523-48fd-8fb0-fb2a23ff0445-catalog-content\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.780661 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/462930d8-4523-48fd-8fb0-fb2a23ff0445-utilities\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.781167 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/462930d8-4523-48fd-8fb0-fb2a23ff0445-utilities\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.781182 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/462930d8-4523-48fd-8fb0-fb2a23ff0445-catalog-content\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.794669 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k2kb\" (UniqueName: \"kubernetes.io/projected/462930d8-4523-48fd-8fb0-fb2a23ff0445-kube-api-access-6k2kb\") pod \"community-operators-k6mmk\" (UID: \"462930d8-4523-48fd-8fb0-fb2a23ff0445\") " pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.825390 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-fh447"] Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.826228 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.827930 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.832497 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fh447"] Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.882243 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2c2b4-2853-480e-8515-b60a0e915a21-catalog-content\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.882281 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2c2b4-2853-480e-8515-b60a0e915a21-utilities\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.882320 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9ntlx\" (UniqueName: \"kubernetes.io/projected/61a2c2b4-2853-480e-8515-b60a0e915a21-kube-api-access-9ntlx\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.941803 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.983572 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2c2b4-2853-480e-8515-b60a0e915a21-utilities\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.983624 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9ntlx\" (UniqueName: \"kubernetes.io/projected/61a2c2b4-2853-480e-8515-b60a0e915a21-kube-api-access-9ntlx\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.983678 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2c2b4-2853-480e-8515-b60a0e915a21-catalog-content\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.984180 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/61a2c2b4-2853-480e-8515-b60a0e915a21-utilities\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.984344 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/61a2c2b4-2853-480e-8515-b60a0e915a21-catalog-content\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:11 crc kubenswrapper[4693]: I1122 09:07:11.998689 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9ntlx\" (UniqueName: \"kubernetes.io/projected/61a2c2b4-2853-480e-8515-b60a0e915a21-kube-api-access-9ntlx\") pod \"certified-operators-fh447\" (UID: \"61a2c2b4-2853-480e-8515-b60a0e915a21\") " pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.143171 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.328131 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-k6mmk"] Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.479212 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-fh447"] Nov 22 09:07:12 crc kubenswrapper[4693]: W1122 09:07:12.485435 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod61a2c2b4_2853_480e_8515_b60a0e915a21.slice/crio-7770befb3130bb43273883dd83c48bdc79f95540cbd9689531c627d8d830ff18 WatchSource:0}: Error finding container 7770befb3130bb43273883dd83c48bdc79f95540cbd9689531c627d8d830ff18: Status 404 returned error can't find the container with id 7770befb3130bb43273883dd83c48bdc79f95540cbd9689531c627d8d830ff18 Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.493148 4693 generic.go:334] "Generic (PLEG): container finished" podID="462930d8-4523-48fd-8fb0-fb2a23ff0445" containerID="65be92d36a4e776334c2dd2530b244a60975283d227df82bb4e4ec91d122ba0c" exitCode=0 Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.493615 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k6mmk" event={"ID":"462930d8-4523-48fd-8fb0-fb2a23ff0445","Type":"ContainerDied","Data":"65be92d36a4e776334c2dd2530b244a60975283d227df82bb4e4ec91d122ba0c"} Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.493635 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k6mmk" event={"ID":"462930d8-4523-48fd-8fb0-fb2a23ff0445","Type":"ContainerStarted","Data":"9b9be64793c59b5366bdc1d4146f3f2ee1cb594f9118432b407b620cc053e15d"} Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.496142 4693 generic.go:334] "Generic (PLEG): container finished" podID="75427b74-79ce-4837-95d0-a5b9b25ac98d" containerID="e89d68ee6dca40e740dd41bcd45a21ecf19a53c62ea73f908bdac39d31d352e4" exitCode=0 Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.496263 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9bbbm" event={"ID":"75427b74-79ce-4837-95d0-a5b9b25ac98d","Type":"ContainerDied","Data":"e89d68ee6dca40e740dd41bcd45a21ecf19a53c62ea73f908bdac39d31d352e4"} Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.499428 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-97l4g" event={"ID":"98f8038a-33d0-416d-bbb8-54004a3fb6fe","Type":"ContainerStarted","Data":"ac6311628c5ab3900a718f1221ccdf0999cc8c6aabd13e4f5916b4d587f99094"} Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.500536 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fh447" event={"ID":"61a2c2b4-2853-480e-8515-b60a0e915a21","Type":"ContainerStarted","Data":"7770befb3130bb43273883dd83c48bdc79f95540cbd9689531c627d8d830ff18"} Nov 22 09:07:12 crc kubenswrapper[4693]: I1122 09:07:12.521771 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-97l4g" podStartSLOduration=2.012529308 podStartE2EDuration="3.521755851s" podCreationTimestamp="2025-11-22 09:07:09 +0000 UTC" firstStartedPulling="2025-11-22 09:07:10.482763597 +0000 UTC m=+226.625265888" lastFinishedPulling="2025-11-22 09:07:11.991990141 +0000 UTC m=+228.134492431" observedRunningTime="2025-11-22 09:07:12.517908877 +0000 UTC m=+228.660411168" watchObservedRunningTime="2025-11-22 09:07:12.521755851 +0000 UTC m=+228.664258141" Nov 22 09:07:13 crc kubenswrapper[4693]: I1122 09:07:13.507262 4693 generic.go:334] "Generic (PLEG): container finished" podID="462930d8-4523-48fd-8fb0-fb2a23ff0445" containerID="df874019272082bf52a21295ec4ff13479937be33a391da7c5293e1b891154ff" exitCode=0 Nov 22 09:07:13 crc kubenswrapper[4693]: I1122 09:07:13.507352 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k6mmk" event={"ID":"462930d8-4523-48fd-8fb0-fb2a23ff0445","Type":"ContainerDied","Data":"df874019272082bf52a21295ec4ff13479937be33a391da7c5293e1b891154ff"} Nov 22 09:07:13 crc kubenswrapper[4693]: I1122 09:07:13.509948 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-9bbbm" event={"ID":"75427b74-79ce-4837-95d0-a5b9b25ac98d","Type":"ContainerStarted","Data":"e956e21a2317fb7926d02444d6720f58dc54995642fc05790315368ed8e2281b"} Nov 22 09:07:13 crc kubenswrapper[4693]: I1122 09:07:13.511498 4693 generic.go:334] "Generic (PLEG): container finished" podID="61a2c2b4-2853-480e-8515-b60a0e915a21" containerID="dd8f4c9fab4166a88e729ef8cce07f5b0546283ec3e62c64b34e6adf943d2a7d" exitCode=0 Nov 22 09:07:13 crc kubenswrapper[4693]: I1122 09:07:13.511582 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fh447" event={"ID":"61a2c2b4-2853-480e-8515-b60a0e915a21","Type":"ContainerDied","Data":"dd8f4c9fab4166a88e729ef8cce07f5b0546283ec3e62c64b34e6adf943d2a7d"} Nov 22 09:07:13 crc kubenswrapper[4693]: I1122 09:07:13.530577 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-9bbbm" podStartSLOduration=2.068347079 podStartE2EDuration="4.530564119s" podCreationTimestamp="2025-11-22 09:07:09 +0000 UTC" firstStartedPulling="2025-11-22 09:07:10.48718931 +0000 UTC m=+226.629691601" lastFinishedPulling="2025-11-22 09:07:12.94940635 +0000 UTC m=+229.091908641" observedRunningTime="2025-11-22 09:07:13.52971511 +0000 UTC m=+229.672217402" watchObservedRunningTime="2025-11-22 09:07:13.530564119 +0000 UTC m=+229.673066410" Nov 22 09:07:15 crc kubenswrapper[4693]: I1122 09:07:15.521484 4693 generic.go:334] "Generic (PLEG): container finished" podID="61a2c2b4-2853-480e-8515-b60a0e915a21" containerID="16bee21a3d909729901d1a1313429db3152dfdb4c6c3b1402a1e6de4f4928455" exitCode=0 Nov 22 09:07:15 crc kubenswrapper[4693]: I1122 09:07:15.521581 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fh447" event={"ID":"61a2c2b4-2853-480e-8515-b60a0e915a21","Type":"ContainerDied","Data":"16bee21a3d909729901d1a1313429db3152dfdb4c6c3b1402a1e6de4f4928455"} Nov 22 09:07:15 crc kubenswrapper[4693]: I1122 09:07:15.524207 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-k6mmk" event={"ID":"462930d8-4523-48fd-8fb0-fb2a23ff0445","Type":"ContainerStarted","Data":"6422784b86e14a250843bed07871208f53ec7738b1566c78e93ec047a750a2b0"} Nov 22 09:07:15 crc kubenswrapper[4693]: I1122 09:07:15.558966 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-k6mmk" podStartSLOduration=3.070034961 podStartE2EDuration="4.558953229s" podCreationTimestamp="2025-11-22 09:07:11 +0000 UTC" firstStartedPulling="2025-11-22 09:07:12.49443906 +0000 UTC m=+228.636941340" lastFinishedPulling="2025-11-22 09:07:13.983357317 +0000 UTC m=+230.125859608" observedRunningTime="2025-11-22 09:07:15.556793383 +0000 UTC m=+231.699295673" watchObservedRunningTime="2025-11-22 09:07:15.558953229 +0000 UTC m=+231.701455520" Nov 22 09:07:15 crc kubenswrapper[4693]: E1122 09:07:15.616528 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod47e432ee_9866_4bab_b0d6_21183cc4698f.slice/crio-c64cd8efc1060e07f866a9135ef3858c83ec8c7d0cfbedd7d09547463fd646d8\": RecentStats: unable to find data in memory cache]" Nov 22 09:07:16 crc kubenswrapper[4693]: I1122 09:07:16.532671 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-fh447" event={"ID":"61a2c2b4-2853-480e-8515-b60a0e915a21","Type":"ContainerStarted","Data":"a3d08ed28904c038cdee8daebb9695e4bc27e3b3eb09b2523693a94917d175b2"} Nov 22 09:07:16 crc kubenswrapper[4693]: I1122 09:07:16.545262 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-fh447" podStartSLOduration=3.054164491 podStartE2EDuration="5.54524767s" podCreationTimestamp="2025-11-22 09:07:11 +0000 UTC" firstStartedPulling="2025-11-22 09:07:13.512532129 +0000 UTC m=+229.655034420" lastFinishedPulling="2025-11-22 09:07:16.003615307 +0000 UTC m=+232.146117599" observedRunningTime="2025-11-22 09:07:16.544821638 +0000 UTC m=+232.687323929" watchObservedRunningTime="2025-11-22 09:07:16.54524767 +0000 UTC m=+232.687749961" Nov 22 09:07:19 crc kubenswrapper[4693]: I1122 09:07:19.556908 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:19 crc kubenswrapper[4693]: I1122 09:07:19.556946 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:19 crc kubenswrapper[4693]: I1122 09:07:19.585294 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:19 crc kubenswrapper[4693]: I1122 09:07:19.751086 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:19 crc kubenswrapper[4693]: I1122 09:07:19.751137 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:19 crc kubenswrapper[4693]: I1122 09:07:19.776883 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:20 crc kubenswrapper[4693]: I1122 09:07:20.573063 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-9bbbm" Nov 22 09:07:20 crc kubenswrapper[4693]: I1122 09:07:20.574936 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-97l4g" Nov 22 09:07:21 crc kubenswrapper[4693]: I1122 09:07:21.942565 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:21 crc kubenswrapper[4693]: I1122 09:07:21.942610 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:21 crc kubenswrapper[4693]: I1122 09:07:21.966818 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:07:22 crc kubenswrapper[4693]: I1122 09:07:22.144226 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:22 crc kubenswrapper[4693]: I1122 09:07:22.144276 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:22 crc kubenswrapper[4693]: I1122 09:07:22.171088 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:22 crc kubenswrapper[4693]: I1122 09:07:22.579698 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-fh447" Nov 22 09:07:22 crc kubenswrapper[4693]: I1122 09:07:22.585111 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-k6mmk" Nov 22 09:09:00 crc kubenswrapper[4693]: I1122 09:09:00.246589 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:09:00 crc kubenswrapper[4693]: I1122 09:09:00.246958 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:09:30 crc kubenswrapper[4693]: I1122 09:09:30.246903 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:09:30 crc kubenswrapper[4693]: I1122 09:09:30.247194 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:10:00 crc kubenswrapper[4693]: I1122 09:10:00.246349 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:10:00 crc kubenswrapper[4693]: I1122 09:10:00.246664 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:10:00 crc kubenswrapper[4693]: I1122 09:10:00.246696 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:10:00 crc kubenswrapper[4693]: I1122 09:10:00.247109 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e9b2098a9f7737f670f889afec51d85ba47f7877ce014bd4df47229fe7ecbfd"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:10:00 crc kubenswrapper[4693]: I1122 09:10:00.247156 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://8e9b2098a9f7737f670f889afec51d85ba47f7877ce014bd4df47229fe7ecbfd" gracePeriod=600 Nov 22 09:10:01 crc kubenswrapper[4693]: I1122 09:10:01.127129 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="8e9b2098a9f7737f670f889afec51d85ba47f7877ce014bd4df47229fe7ecbfd" exitCode=0 Nov 22 09:10:01 crc kubenswrapper[4693]: I1122 09:10:01.127194 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"8e9b2098a9f7737f670f889afec51d85ba47f7877ce014bd4df47229fe7ecbfd"} Nov 22 09:10:01 crc kubenswrapper[4693]: I1122 09:10:01.127345 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"329ed12425e201bb8bae43071b579a5f24384df493cc4b6bb11f0437b6b63f96"} Nov 22 09:10:01 crc kubenswrapper[4693]: I1122 09:10:01.127374 4693 scope.go:117] "RemoveContainer" containerID="89f9b7264c56ea9cd9bcea583e923769622bc8e6f44c10d09f3e281c81dad9de" Nov 22 09:10:56 crc kubenswrapper[4693]: I1122 09:10:56.988947 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dwrrv"] Nov 22 09:10:56 crc kubenswrapper[4693]: I1122 09:10:56.989994 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:56 crc kubenswrapper[4693]: I1122 09:10:56.998034 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dwrrv"] Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164284 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/732fdf6e-1b03-4a5d-9566-881fe6d37500-registry-certificates\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164370 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-registry-tls\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164399 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-klqsh\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-kube-api-access-klqsh\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164472 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/732fdf6e-1b03-4a5d-9566-881fe6d37500-trusted-ca\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164503 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-bound-sa-token\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164629 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164767 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/732fdf6e-1b03-4a5d-9566-881fe6d37500-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.164862 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/732fdf6e-1b03-4a5d-9566-881fe6d37500-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.184641 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266255 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/732fdf6e-1b03-4a5d-9566-881fe6d37500-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266301 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/732fdf6e-1b03-4a5d-9566-881fe6d37500-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266328 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/732fdf6e-1b03-4a5d-9566-881fe6d37500-registry-certificates\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266363 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-registry-tls\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266386 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-klqsh\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-kube-api-access-klqsh\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266422 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/732fdf6e-1b03-4a5d-9566-881fe6d37500-trusted-ca\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266441 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-bound-sa-token\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.266887 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/732fdf6e-1b03-4a5d-9566-881fe6d37500-ca-trust-extracted\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.267727 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/732fdf6e-1b03-4a5d-9566-881fe6d37500-trusted-ca\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.267864 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/732fdf6e-1b03-4a5d-9566-881fe6d37500-registry-certificates\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.270990 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/732fdf6e-1b03-4a5d-9566-881fe6d37500-installation-pull-secrets\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.271042 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-registry-tls\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.287895 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-bound-sa-token\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.288013 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-klqsh\" (UniqueName: \"kubernetes.io/projected/732fdf6e-1b03-4a5d-9566-881fe6d37500-kube-api-access-klqsh\") pod \"image-registry-66df7c8f76-dwrrv\" (UID: \"732fdf6e-1b03-4a5d-9566-881fe6d37500\") " pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.301796 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:57 crc kubenswrapper[4693]: I1122 09:10:57.643535 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-dwrrv"] Nov 22 09:10:58 crc kubenswrapper[4693]: I1122 09:10:58.335578 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" event={"ID":"732fdf6e-1b03-4a5d-9566-881fe6d37500","Type":"ContainerStarted","Data":"b94843df0d499a795be1f421b778d7a5dbf5409dcf98fe51f922a73ddca16ee0"} Nov 22 09:10:58 crc kubenswrapper[4693]: I1122 09:10:58.335870 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:10:58 crc kubenswrapper[4693]: I1122 09:10:58.335883 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" event={"ID":"732fdf6e-1b03-4a5d-9566-881fe6d37500","Type":"ContainerStarted","Data":"c3857d74eaa415548e071e43455b8beec1f9875e8e6e4284096464c1c929d325"} Nov 22 09:10:58 crc kubenswrapper[4693]: I1122 09:10:58.349857 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" podStartSLOduration=2.349826737 podStartE2EDuration="2.349826737s" podCreationTimestamp="2025-11-22 09:10:56 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:10:58.348206335 +0000 UTC m=+454.490708626" watchObservedRunningTime="2025-11-22 09:10:58.349826737 +0000 UTC m=+454.492329027" Nov 22 09:11:17 crc kubenswrapper[4693]: I1122 09:11:17.305211 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-dwrrv" Nov 22 09:11:17 crc kubenswrapper[4693]: I1122 09:11:17.334599 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-s8j86"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.395456 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-pb9xc"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.396532 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.397263 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tr5jb"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.397688 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-tr5jb" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.399701 4693 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-f57mg" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.399926 4693 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-tjhv6" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.400059 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.401904 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.408286 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-w2r5d"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.408759 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.410286 4693 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-rjx4m" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.410945 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-pb9xc"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.415630 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tr5jb"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.417579 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-w2r5d"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.533427 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gbzwj\" (UniqueName: \"kubernetes.io/projected/f5108d4d-c652-4cba-8492-281beb13ce46-kube-api-access-gbzwj\") pod \"cert-manager-cainjector-7f985d654d-pb9xc\" (UID: \"f5108d4d-c652-4cba-8492-281beb13ce46\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.533640 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vv8rm\" (UniqueName: \"kubernetes.io/projected/58fe0427-fbdb-40ea-9b6e-80f09e215015-kube-api-access-vv8rm\") pod \"cert-manager-5b446d88c5-tr5jb\" (UID: \"58fe0427-fbdb-40ea-9b6e-80f09e215015\") " pod="cert-manager/cert-manager-5b446d88c5-tr5jb" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.533674 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m2j2d\" (UniqueName: \"kubernetes.io/projected/3827b7dd-1354-484e-8430-daec9b09d589-kube-api-access-m2j2d\") pod \"cert-manager-webhook-5655c58dd6-w2r5d\" (UID: \"3827b7dd-1354-484e-8430-daec9b09d589\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.634417 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vv8rm\" (UniqueName: \"kubernetes.io/projected/58fe0427-fbdb-40ea-9b6e-80f09e215015-kube-api-access-vv8rm\") pod \"cert-manager-5b446d88c5-tr5jb\" (UID: \"58fe0427-fbdb-40ea-9b6e-80f09e215015\") " pod="cert-manager/cert-manager-5b446d88c5-tr5jb" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.634452 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m2j2d\" (UniqueName: \"kubernetes.io/projected/3827b7dd-1354-484e-8430-daec9b09d589-kube-api-access-m2j2d\") pod \"cert-manager-webhook-5655c58dd6-w2r5d\" (UID: \"3827b7dd-1354-484e-8430-daec9b09d589\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.634495 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gbzwj\" (UniqueName: \"kubernetes.io/projected/f5108d4d-c652-4cba-8492-281beb13ce46-kube-api-access-gbzwj\") pod \"cert-manager-cainjector-7f985d654d-pb9xc\" (UID: \"f5108d4d-c652-4cba-8492-281beb13ce46\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.650957 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vv8rm\" (UniqueName: \"kubernetes.io/projected/58fe0427-fbdb-40ea-9b6e-80f09e215015-kube-api-access-vv8rm\") pod \"cert-manager-5b446d88c5-tr5jb\" (UID: \"58fe0427-fbdb-40ea-9b6e-80f09e215015\") " pod="cert-manager/cert-manager-5b446d88c5-tr5jb" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.651015 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gbzwj\" (UniqueName: \"kubernetes.io/projected/f5108d4d-c652-4cba-8492-281beb13ce46-kube-api-access-gbzwj\") pod \"cert-manager-cainjector-7f985d654d-pb9xc\" (UID: \"f5108d4d-c652-4cba-8492-281beb13ce46\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.651481 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m2j2d\" (UniqueName: \"kubernetes.io/projected/3827b7dd-1354-484e-8430-daec9b09d589-kube-api-access-m2j2d\") pod \"cert-manager-webhook-5655c58dd6-w2r5d\" (UID: \"3827b7dd-1354-484e-8430-daec9b09d589\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.711601 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.717407 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-tr5jb" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.723831 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.881780 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-tr5jb"] Nov 22 09:11:41 crc kubenswrapper[4693]: I1122 09:11:41.888298 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.114620 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-w2r5d"] Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.116535 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-pb9xc"] Nov 22 09:11:42 crc kubenswrapper[4693]: W1122 09:11:42.119803 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3827b7dd_1354_484e_8430_daec9b09d589.slice/crio-4be83d4a7da77d90010257b2abf685cf939105d3a91b3eb4ecf3ca0e7f781156 WatchSource:0}: Error finding container 4be83d4a7da77d90010257b2abf685cf939105d3a91b3eb4ecf3ca0e7f781156: Status 404 returned error can't find the container with id 4be83d4a7da77d90010257b2abf685cf939105d3a91b3eb4ecf3ca0e7f781156 Nov 22 09:11:42 crc kubenswrapper[4693]: W1122 09:11:42.120323 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf5108d4d_c652_4cba_8492_281beb13ce46.slice/crio-5439fbe6b5535f5c8d7ef32d10fbe6a2b276f003c7d4b9b54d5a701945c0cb14 WatchSource:0}: Error finding container 5439fbe6b5535f5c8d7ef32d10fbe6a2b276f003c7d4b9b54d5a701945c0cb14: Status 404 returned error can't find the container with id 5439fbe6b5535f5c8d7ef32d10fbe6a2b276f003c7d4b9b54d5a701945c0cb14 Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.359410 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" podUID="cf82f0b1-de6b-449a-be90-f76c217b315b" containerName="registry" containerID="cri-o://4491ba63e95dbfc53749d56d31d08e28cc3ad6a3da767a752b305ae2a166a411" gracePeriod=30 Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.519004 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" event={"ID":"f5108d4d-c652-4cba-8492-281beb13ce46","Type":"ContainerStarted","Data":"5439fbe6b5535f5c8d7ef32d10fbe6a2b276f003c7d4b9b54d5a701945c0cb14"} Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.522500 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" event={"ID":"3827b7dd-1354-484e-8430-daec9b09d589","Type":"ContainerStarted","Data":"4be83d4a7da77d90010257b2abf685cf939105d3a91b3eb4ecf3ca0e7f781156"} Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.524108 4693 generic.go:334] "Generic (PLEG): container finished" podID="cf82f0b1-de6b-449a-be90-f76c217b315b" containerID="4491ba63e95dbfc53749d56d31d08e28cc3ad6a3da767a752b305ae2a166a411" exitCode=0 Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.524163 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" event={"ID":"cf82f0b1-de6b-449a-be90-f76c217b315b","Type":"ContainerDied","Data":"4491ba63e95dbfc53749d56d31d08e28cc3ad6a3da767a752b305ae2a166a411"} Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.525518 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-tr5jb" event={"ID":"58fe0427-fbdb-40ea-9b6e-80f09e215015","Type":"ContainerStarted","Data":"f62793760d3a6169f83503f52d33c0717feb3b8b2538e8deab4738f6069723f6"} Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.621244 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.747577 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf82f0b1-de6b-449a-be90-f76c217b315b-installation-pull-secrets\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.747862 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-trusted-ca\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.747893 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf82f0b1-de6b-449a-be90-f76c217b315b-ca-trust-extracted\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.748004 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.748040 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-tls\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.748072 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-certificates\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.748094 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-bound-sa-token\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.748124 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9kfj\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-kube-api-access-z9kfj\") pod \"cf82f0b1-de6b-449a-be90-f76c217b315b\" (UID: \"cf82f0b1-de6b-449a-be90-f76c217b315b\") " Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.750089 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.750130 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.754522 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cf82f0b1-de6b-449a-be90-f76c217b315b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.758333 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.758629 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.759101 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-kube-api-access-z9kfj" (OuterVolumeSpecName: "kube-api-access-z9kfj") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "kube-api-access-z9kfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.763720 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf82f0b1-de6b-449a-be90-f76c217b315b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.766305 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "cf82f0b1-de6b-449a-be90-f76c217b315b" (UID: "cf82f0b1-de6b-449a-be90-f76c217b315b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.849928 4693 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/cf82f0b1-de6b-449a-be90-f76c217b315b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.849965 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.849979 4693 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/cf82f0b1-de6b-449a-be90-f76c217b315b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.849988 4693 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.849997 4693 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/cf82f0b1-de6b-449a-be90-f76c217b315b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.850007 4693 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:42 crc kubenswrapper[4693]: I1122 09:11:42.850016 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9kfj\" (UniqueName: \"kubernetes.io/projected/cf82f0b1-de6b-449a-be90-f76c217b315b-kube-api-access-z9kfj\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:43 crc kubenswrapper[4693]: I1122 09:11:43.532953 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" event={"ID":"cf82f0b1-de6b-449a-be90-f76c217b315b","Type":"ContainerDied","Data":"7d105b8933b19a155be04998a638dfb91612ee46b767b4dc9d7f95da1008a785"} Nov 22 09:11:43 crc kubenswrapper[4693]: I1122 09:11:43.533276 4693 scope.go:117] "RemoveContainer" containerID="4491ba63e95dbfc53749d56d31d08e28cc3ad6a3da767a752b305ae2a166a411" Nov 22 09:11:43 crc kubenswrapper[4693]: I1122 09:11:43.533176 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-s8j86" Nov 22 09:11:43 crc kubenswrapper[4693]: I1122 09:11:43.563706 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-s8j86"] Nov 22 09:11:43 crc kubenswrapper[4693]: I1122 09:11:43.565983 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-s8j86"] Nov 22 09:11:44 crc kubenswrapper[4693]: I1122 09:11:44.151876 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf82f0b1-de6b-449a-be90-f76c217b315b" path="/var/lib/kubelet/pods/cf82f0b1-de6b-449a-be90-f76c217b315b/volumes" Nov 22 09:11:44 crc kubenswrapper[4693]: I1122 09:11:44.540253 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-tr5jb" event={"ID":"58fe0427-fbdb-40ea-9b6e-80f09e215015","Type":"ContainerStarted","Data":"88ebf85c92065a5ff8880c66c06d8c4b13159fca39b55cec31ad2b9ea4e696dc"} Nov 22 09:11:45 crc kubenswrapper[4693]: I1122 09:11:45.545707 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" event={"ID":"3827b7dd-1354-484e-8430-daec9b09d589","Type":"ContainerStarted","Data":"6f4813ba7a8f316924a4f15077f80028dc49fb605ae04e39ab691db8bbf0505e"} Nov 22 09:11:45 crc kubenswrapper[4693]: I1122 09:11:45.545774 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:45 crc kubenswrapper[4693]: I1122 09:11:45.548014 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" event={"ID":"f5108d4d-c652-4cba-8492-281beb13ce46","Type":"ContainerStarted","Data":"db796069934689b6ef5b84ac8ecdc3a830f61d592e9c398d6fe0a973b27846b5"} Nov 22 09:11:45 crc kubenswrapper[4693]: I1122 09:11:45.559306 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-tr5jb" podStartSLOduration=2.699324988 podStartE2EDuration="4.559293815s" podCreationTimestamp="2025-11-22 09:11:41 +0000 UTC" firstStartedPulling="2025-11-22 09:11:41.888098409 +0000 UTC m=+498.030600690" lastFinishedPulling="2025-11-22 09:11:43.748067226 +0000 UTC m=+499.890569517" observedRunningTime="2025-11-22 09:11:44.554531887 +0000 UTC m=+500.697034178" watchObservedRunningTime="2025-11-22 09:11:45.559293815 +0000 UTC m=+501.701796106" Nov 22 09:11:45 crc kubenswrapper[4693]: I1122 09:11:45.559960 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" podStartSLOduration=1.981410358 podStartE2EDuration="4.559955788s" podCreationTimestamp="2025-11-22 09:11:41 +0000 UTC" firstStartedPulling="2025-11-22 09:11:42.121905471 +0000 UTC m=+498.264407752" lastFinishedPulling="2025-11-22 09:11:44.700450891 +0000 UTC m=+500.842953182" observedRunningTime="2025-11-22 09:11:45.557077965 +0000 UTC m=+501.699580247" watchObservedRunningTime="2025-11-22 09:11:45.559955788 +0000 UTC m=+501.702458069" Nov 22 09:11:45 crc kubenswrapper[4693]: I1122 09:11:45.569402 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-pb9xc" podStartSLOduration=1.992684955 podStartE2EDuration="4.569393526s" podCreationTimestamp="2025-11-22 09:11:41 +0000 UTC" firstStartedPulling="2025-11-22 09:11:42.121904328 +0000 UTC m=+498.264406620" lastFinishedPulling="2025-11-22 09:11:44.6986129 +0000 UTC m=+500.841115191" observedRunningTime="2025-11-22 09:11:45.568242385 +0000 UTC m=+501.710744676" watchObservedRunningTime="2025-11-22 09:11:45.569393526 +0000 UTC m=+501.711895817" Nov 22 09:11:51 crc kubenswrapper[4693]: I1122 09:11:51.726803 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-w2r5d" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.070793 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-852ps"] Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071781 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-controller" containerID="cri-o://c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071920 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="northd" containerID="cri-o://f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071985 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-node" containerID="cri-o://7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071985 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-acl-logging" containerID="cri-o://44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071866 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="nbdb" containerID="cri-o://c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071906 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.071894 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="sbdb" containerID="cri-o://3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.098415 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" containerID="cri-o://4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" gracePeriod=30 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.328638 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/3.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.331020 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovn-acl-logging/0.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.331545 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovn-controller/0.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.332071 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.385809 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-vqs7m"] Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386033 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386051 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386060 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="nbdb" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386067 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="nbdb" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386076 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cf82f0b1-de6b-449a-be90-f76c217b315b" containerName="registry" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386082 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="cf82f0b1-de6b-449a-be90-f76c217b315b" containerName="registry" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386092 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386097 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386106 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kubecfg-setup" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386110 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kubecfg-setup" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386117 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386121 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386131 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-node" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386139 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-node" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386149 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-acl-logging" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386155 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-acl-logging" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386162 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="sbdb" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386167 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="sbdb" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386175 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386180 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386188 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386194 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386201 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="northd" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386206 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="northd" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386280 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="sbdb" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386290 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386296 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386302 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386309 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovn-acl-logging" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386314 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-ovn-metrics" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386321 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386330 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="nbdb" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386337 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="cf82f0b1-de6b-449a-be90-f76c217b315b" containerName="registry" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386343 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386349 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="kube-rbac-proxy-node" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386355 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="northd" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386428 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386435 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386519 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.386617 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.386625 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerName="ovnkube-controller" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.387825 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394376 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-systemd-units\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394499 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-node-log\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394447 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394575 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-node-log" (OuterVolumeSpecName: "node-log") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394525 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-netns\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394612 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-openvswitch\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394632 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-ovn\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394669 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394711 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394679 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394695 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-config\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394775 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-var-lib-openvswitch\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394797 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-slash\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394820 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-etc-openvswitch\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394839 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-netd\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394813 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394838 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-slash" (OuterVolumeSpecName: "host-slash") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394870 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394899 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-log-socket" (OuterVolumeSpecName: "log-socket") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394912 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394880 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-log-socket\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.394951 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-env-overrides\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395069 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-env-overrides\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395094 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-systemd-units\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395118 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395124 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-log-socket\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395165 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-cni-bin\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395192 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovnkube-config\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395219 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-node-log\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395242 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-run-netns\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395371 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395392 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-var-lib-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395413 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395428 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-systemd\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395426 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395448 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395624 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-slash\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395667 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovn-node-metrics-cert\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395691 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-ovn\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395730 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-etc-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395754 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-cni-netd\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395830 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovnkube-script-lib\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395865 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-kubelet\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395884 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5krqg\" (UniqueName: \"kubernetes.io/projected/ac92124f-4062-40d7-9d85-98436f3ea8a6-kube-api-access-5krqg\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395931 4693 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395942 4693 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-node-log\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395952 4693 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395959 4693 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395967 4693 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395975 4693 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395985 4693 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.395992 4693 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-slash\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.396000 4693 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.396008 4693 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.396016 4693 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-log-socket\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.396022 4693 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496302 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-systemd\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496354 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-script-lib\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496374 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-bin\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496398 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-ovn-kubernetes\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496412 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-kubelet\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496438 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2ndl\" (UniqueName: \"kubernetes.io/projected/2fa68d41-61c5-4781-8984-add9804c1b4b-kube-api-access-p2ndl\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496492 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496510 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2fa68d41-61c5-4781-8984-add9804c1b4b-ovn-node-metrics-cert\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496538 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-var-lib-cni-networks-ovn-kubernetes\") pod \"2fa68d41-61c5-4781-8984-add9804c1b4b\" (UID: \"2fa68d41-61c5-4781-8984-add9804c1b4b\") " Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496601 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-systemd-units\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496590 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496654 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496567 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496672 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-systemd-units\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496628 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-log-socket\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496755 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-log-socket\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496811 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-cni-bin\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496839 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovnkube-config\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496881 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496891 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-node-log\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496916 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-node-log\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496934 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-cni-bin\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497012 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-run-netns\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.496954 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-run-netns\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497078 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497123 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-var-lib-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497191 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497214 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-systemd\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497245 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497317 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-slash\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497363 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovn-node-metrics-cert\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497390 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-ovn\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497442 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-etc-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497453 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497495 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-cni-netd\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497513 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-var-lib-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497547 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-systemd\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497471 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-cni-netd\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497590 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-run-ovn\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497557 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-slash\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497622 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-run-ovn-kubernetes\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497595 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497593 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-etc-openvswitch\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497709 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovnkube-script-lib\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497729 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-kubelet\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497750 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5krqg\" (UniqueName: \"kubernetes.io/projected/ac92124f-4062-40d7-9d85-98436f3ea8a6-kube-api-access-5krqg\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497771 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-env-overrides\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497820 4693 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497832 4693 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/2fa68d41-61c5-4781-8984-add9804c1b4b-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497863 4693 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497872 4693 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.497882 4693 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.498223 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ac92124f-4062-40d7-9d85-98436f3ea8a6-host-kubelet\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.498581 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovnkube-config\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.498604 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovnkube-script-lib\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.498868 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ac92124f-4062-40d7-9d85-98436f3ea8a6-env-overrides\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.503496 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2fa68d41-61c5-4781-8984-add9804c1b4b-kube-api-access-p2ndl" (OuterVolumeSpecName: "kube-api-access-p2ndl") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "kube-api-access-p2ndl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.504059 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2fa68d41-61c5-4781-8984-add9804c1b4b-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.504604 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ac92124f-4062-40d7-9d85-98436f3ea8a6-ovn-node-metrics-cert\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.509781 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "2fa68d41-61c5-4781-8984-add9804c1b4b" (UID: "2fa68d41-61c5-4781-8984-add9804c1b4b"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.512754 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5krqg\" (UniqueName: \"kubernetes.io/projected/ac92124f-4062-40d7-9d85-98436f3ea8a6-kube-api-access-5krqg\") pod \"ovnkube-node-vqs7m\" (UID: \"ac92124f-4062-40d7-9d85-98436f3ea8a6\") " pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.591926 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovnkube-controller/3.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.594130 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovn-acl-logging/0.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.594619 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-852ps_2fa68d41-61c5-4781-8984-add9804c1b4b/ovn-controller/0.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595151 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" exitCode=0 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595173 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" exitCode=0 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595182 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" exitCode=0 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595189 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" exitCode=0 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595197 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" exitCode=0 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595193 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595204 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" exitCode=0 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595235 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" exitCode=143 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595241 4693 generic.go:334] "Generic (PLEG): container finished" podID="2fa68d41-61c5-4781-8984-add9804c1b4b" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" exitCode=143 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595245 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595261 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595272 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595283 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595293 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595286 4693 scope.go:117] "RemoveContainer" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595304 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595429 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595441 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595446 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595452 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595456 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595461 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595466 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595471 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595486 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595503 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595511 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595516 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595520 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595525 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595538 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595543 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595547 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595552 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595225 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595556 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595644 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595655 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595662 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595667 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595672 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595677 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595684 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595689 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595694 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595699 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595703 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595710 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-852ps" event={"ID":"2fa68d41-61c5-4781-8984-add9804c1b4b","Type":"ContainerDied","Data":"81af516e48f6a2f81f5a5ace52de774b1cde8c1d50c7263d9fe40ea0beb93812"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595717 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595722 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595727 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595732 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595737 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595742 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595747 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595751 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595756 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.595762 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.598729 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/2.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599046 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2ndl\" (UniqueName: \"kubernetes.io/projected/2fa68d41-61c5-4781-8984-add9804c1b4b-kube-api-access-p2ndl\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599070 4693 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/2fa68d41-61c5-4781-8984-add9804c1b4b-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599081 4693 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/2fa68d41-61c5-4781-8984-add9804c1b4b-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599131 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/1.log" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599166 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7" containerID="1173ab64803d2496086dccda3635309cbc051245ad9b4e5b7bd87bd5f093ba6e" exitCode=2 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599199 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerDied","Data":"1173ab64803d2496086dccda3635309cbc051245ad9b4e5b7bd87bd5f093ba6e"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599227 4693 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f"} Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.599640 4693 scope.go:117] "RemoveContainer" containerID="1173ab64803d2496086dccda3635309cbc051245ad9b4e5b7bd87bd5f093ba6e" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.599822 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-2s9rh_openshift-multus(9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7)\"" pod="openshift-multus/multus-2s9rh" podUID="9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.613380 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.624992 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-852ps"] Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.629084 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-852ps"] Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.648750 4693 scope.go:117] "RemoveContainer" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.664769 4693 scope.go:117] "RemoveContainer" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.675602 4693 scope.go:117] "RemoveContainer" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.686424 4693 scope.go:117] "RemoveContainer" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.695814 4693 scope.go:117] "RemoveContainer" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.700891 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.704645 4693 scope.go:117] "RemoveContainer" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.715599 4693 scope.go:117] "RemoveContainer" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" Nov 22 09:11:53 crc kubenswrapper[4693]: W1122 09:11:53.724214 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac92124f_4062_40d7_9d85_98436f3ea8a6.slice/crio-43f44dcc88114a36d99d47301269dff68ce5f47123933055d0f0ca763f00c4b6 WatchSource:0}: Error finding container 43f44dcc88114a36d99d47301269dff68ce5f47123933055d0f0ca763f00c4b6: Status 404 returned error can't find the container with id 43f44dcc88114a36d99d47301269dff68ce5f47123933055d0f0ca763f00c4b6 Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.728326 4693 scope.go:117] "RemoveContainer" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.749605 4693 scope.go:117] "RemoveContainer" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.750080 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": container with ID starting with 4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747 not found: ID does not exist" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.750124 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} err="failed to get container status \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": rpc error: code = NotFound desc = could not find container \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": container with ID starting with 4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.750149 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.750433 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": container with ID starting with bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242 not found: ID does not exist" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.750460 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} err="failed to get container status \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": rpc error: code = NotFound desc = could not find container \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": container with ID starting with bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.750473 4693 scope.go:117] "RemoveContainer" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.750947 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": container with ID starting with 3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076 not found: ID does not exist" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.750971 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} err="failed to get container status \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": rpc error: code = NotFound desc = could not find container \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": container with ID starting with 3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.750985 4693 scope.go:117] "RemoveContainer" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.751657 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": container with ID starting with c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651 not found: ID does not exist" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.751715 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} err="failed to get container status \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": rpc error: code = NotFound desc = could not find container \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": container with ID starting with c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.751758 4693 scope.go:117] "RemoveContainer" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.752435 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": container with ID starting with f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa not found: ID does not exist" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.752470 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} err="failed to get container status \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": rpc error: code = NotFound desc = could not find container \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": container with ID starting with f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.752490 4693 scope.go:117] "RemoveContainer" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.752868 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": container with ID starting with ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef not found: ID does not exist" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.752894 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} err="failed to get container status \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": rpc error: code = NotFound desc = could not find container \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": container with ID starting with ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.752909 4693 scope.go:117] "RemoveContainer" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.753242 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": container with ID starting with 7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926 not found: ID does not exist" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.753342 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} err="failed to get container status \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": rpc error: code = NotFound desc = could not find container \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": container with ID starting with 7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.753486 4693 scope.go:117] "RemoveContainer" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.753984 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": container with ID starting with 44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0 not found: ID does not exist" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.754035 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} err="failed to get container status \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": rpc error: code = NotFound desc = could not find container \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": container with ID starting with 44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.754068 4693 scope.go:117] "RemoveContainer" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.754450 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": container with ID starting with c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07 not found: ID does not exist" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.754472 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} err="failed to get container status \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": rpc error: code = NotFound desc = could not find container \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": container with ID starting with c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.754486 4693 scope.go:117] "RemoveContainer" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" Nov 22 09:11:53 crc kubenswrapper[4693]: E1122 09:11:53.754739 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": container with ID starting with 18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234 not found: ID does not exist" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.754762 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} err="failed to get container status \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": rpc error: code = NotFound desc = could not find container \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": container with ID starting with 18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.754776 4693 scope.go:117] "RemoveContainer" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.755258 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} err="failed to get container status \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": rpc error: code = NotFound desc = could not find container \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": container with ID starting with 4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.755281 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.755596 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} err="failed to get container status \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": rpc error: code = NotFound desc = could not find container \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": container with ID starting with bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.755619 4693 scope.go:117] "RemoveContainer" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.756011 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} err="failed to get container status \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": rpc error: code = NotFound desc = could not find container \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": container with ID starting with 3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.756034 4693 scope.go:117] "RemoveContainer" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.756362 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} err="failed to get container status \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": rpc error: code = NotFound desc = could not find container \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": container with ID starting with c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.756381 4693 scope.go:117] "RemoveContainer" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.757892 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} err="failed to get container status \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": rpc error: code = NotFound desc = could not find container \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": container with ID starting with f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.757922 4693 scope.go:117] "RemoveContainer" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.758289 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} err="failed to get container status \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": rpc error: code = NotFound desc = could not find container \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": container with ID starting with ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.758316 4693 scope.go:117] "RemoveContainer" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.758605 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} err="failed to get container status \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": rpc error: code = NotFound desc = could not find container \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": container with ID starting with 7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.758630 4693 scope.go:117] "RemoveContainer" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.758904 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} err="failed to get container status \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": rpc error: code = NotFound desc = could not find container \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": container with ID starting with 44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.758927 4693 scope.go:117] "RemoveContainer" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.759119 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} err="failed to get container status \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": rpc error: code = NotFound desc = could not find container \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": container with ID starting with c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.759148 4693 scope.go:117] "RemoveContainer" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.759507 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} err="failed to get container status \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": rpc error: code = NotFound desc = could not find container \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": container with ID starting with 18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.759542 4693 scope.go:117] "RemoveContainer" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.759966 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} err="failed to get container status \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": rpc error: code = NotFound desc = could not find container \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": container with ID starting with 4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.759995 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.760394 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} err="failed to get container status \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": rpc error: code = NotFound desc = could not find container \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": container with ID starting with bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.760421 4693 scope.go:117] "RemoveContainer" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.760718 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} err="failed to get container status \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": rpc error: code = NotFound desc = could not find container \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": container with ID starting with 3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.760747 4693 scope.go:117] "RemoveContainer" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761028 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} err="failed to get container status \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": rpc error: code = NotFound desc = could not find container \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": container with ID starting with c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761051 4693 scope.go:117] "RemoveContainer" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761331 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} err="failed to get container status \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": rpc error: code = NotFound desc = could not find container \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": container with ID starting with f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761358 4693 scope.go:117] "RemoveContainer" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761623 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} err="failed to get container status \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": rpc error: code = NotFound desc = could not find container \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": container with ID starting with ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761661 4693 scope.go:117] "RemoveContainer" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761950 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} err="failed to get container status \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": rpc error: code = NotFound desc = could not find container \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": container with ID starting with 7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.761969 4693 scope.go:117] "RemoveContainer" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.762245 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} err="failed to get container status \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": rpc error: code = NotFound desc = could not find container \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": container with ID starting with 44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.762272 4693 scope.go:117] "RemoveContainer" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.762545 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} err="failed to get container status \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": rpc error: code = NotFound desc = could not find container \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": container with ID starting with c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.762572 4693 scope.go:117] "RemoveContainer" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.762869 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} err="failed to get container status \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": rpc error: code = NotFound desc = could not find container \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": container with ID starting with 18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.762894 4693 scope.go:117] "RemoveContainer" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.763226 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} err="failed to get container status \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": rpc error: code = NotFound desc = could not find container \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": container with ID starting with 4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.763247 4693 scope.go:117] "RemoveContainer" containerID="bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.763516 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242"} err="failed to get container status \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": rpc error: code = NotFound desc = could not find container \"bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242\": container with ID starting with bc83ef9297bf520b725828cffb957bd7b79014ca1ef84fbaf6af5e9bb92e4242 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.763557 4693 scope.go:117] "RemoveContainer" containerID="3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.763812 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076"} err="failed to get container status \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": rpc error: code = NotFound desc = could not find container \"3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076\": container with ID starting with 3293326fcdfb7ff3593712077c55e0070bc0f57583e6c55ef1a8a6f350a9a076 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.763831 4693 scope.go:117] "RemoveContainer" containerID="c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.764170 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651"} err="failed to get container status \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": rpc error: code = NotFound desc = could not find container \"c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651\": container with ID starting with c16fa73c673e0003d8ece8259e22783bb2f3c4c2ccdca7963216e73d57126651 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.764193 4693 scope.go:117] "RemoveContainer" containerID="f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.764469 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa"} err="failed to get container status \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": rpc error: code = NotFound desc = could not find container \"f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa\": container with ID starting with f25147ad5b2f8a6f3f36092ea185f9e6574a66169ae0b9570ce16e1ee2e98aaa not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.764491 4693 scope.go:117] "RemoveContainer" containerID="ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.764780 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef"} err="failed to get container status \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": rpc error: code = NotFound desc = could not find container \"ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef\": container with ID starting with ad467514d1a2cd1dc898df6a5a2e9cb852c1718c9aadf716a72ec84e07a896ef not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.764807 4693 scope.go:117] "RemoveContainer" containerID="7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.765082 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926"} err="failed to get container status \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": rpc error: code = NotFound desc = could not find container \"7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926\": container with ID starting with 7b1421130d0b8b5f421e752f1295ac256807169cf711604539fbbe486e028926 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.765105 4693 scope.go:117] "RemoveContainer" containerID="44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.765396 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0"} err="failed to get container status \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": rpc error: code = NotFound desc = could not find container \"44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0\": container with ID starting with 44f6aeb34ac92e4c6bb63055789ee070a33cf1a2fb430fdc389b493ea70063e0 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.765420 4693 scope.go:117] "RemoveContainer" containerID="c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.765768 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07"} err="failed to get container status \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": rpc error: code = NotFound desc = could not find container \"c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07\": container with ID starting with c558e5a06a6aa1276562b62772bc86a646e8bf13398a7db9e20e4eb4560b4e07 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.765790 4693 scope.go:117] "RemoveContainer" containerID="18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.766077 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234"} err="failed to get container status \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": rpc error: code = NotFound desc = could not find container \"18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234\": container with ID starting with 18c074068d07b0400705e2bf8c48562158cde979eb949315210642176ea66234 not found: ID does not exist" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.766096 4693 scope.go:117] "RemoveContainer" containerID="4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747" Nov 22 09:11:53 crc kubenswrapper[4693]: I1122 09:11:53.766352 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747"} err="failed to get container status \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": rpc error: code = NotFound desc = could not find container \"4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747\": container with ID starting with 4689c6f6c172259c1ae916adecf0a3270f4bd4a3acca634c8c84f88e087f9747 not found: ID does not exist" Nov 22 09:11:54 crc kubenswrapper[4693]: I1122 09:11:54.152635 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2fa68d41-61c5-4781-8984-add9804c1b4b" path="/var/lib/kubelet/pods/2fa68d41-61c5-4781-8984-add9804c1b4b/volumes" Nov 22 09:11:54 crc kubenswrapper[4693]: I1122 09:11:54.606343 4693 generic.go:334] "Generic (PLEG): container finished" podID="ac92124f-4062-40d7-9d85-98436f3ea8a6" containerID="381811b9fe4e77b4c2f18b01aca0b558e167c1eccaaf4218b0c691caa92b8cb5" exitCode=0 Nov 22 09:11:54 crc kubenswrapper[4693]: I1122 09:11:54.606455 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerDied","Data":"381811b9fe4e77b4c2f18b01aca0b558e167c1eccaaf4218b0c691caa92b8cb5"} Nov 22 09:11:54 crc kubenswrapper[4693]: I1122 09:11:54.606544 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"43f44dcc88114a36d99d47301269dff68ce5f47123933055d0f0ca763f00c4b6"} Nov 22 09:11:55 crc kubenswrapper[4693]: I1122 09:11:55.615481 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"9b44473e4d0d2930827a9cb55d1e7a6f9d9613faeaacc4c412bc2a237efb0fda"} Nov 22 09:11:55 crc kubenswrapper[4693]: I1122 09:11:55.616681 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"ff8d59abb4427261b7b51dbf0839a32d761b8e08f95217932d6145ce28c8fe0f"} Nov 22 09:11:55 crc kubenswrapper[4693]: I1122 09:11:55.616743 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"48be34f4189f18e5febfb58e012554188d3f5d0c3f1d37f7487e3d0c8890eb45"} Nov 22 09:11:55 crc kubenswrapper[4693]: I1122 09:11:55.616758 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"83bf80f70e524d80533539a5c188e0e95487570df795a639e483bffd71d5489e"} Nov 22 09:11:55 crc kubenswrapper[4693]: I1122 09:11:55.616773 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"70f6a3e7eb7a6862472b5857d72797db4dca43eba7151d1160887bf72f49f452"} Nov 22 09:11:55 crc kubenswrapper[4693]: I1122 09:11:55.616786 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"a24c07ed412f2b399bb327e5d03ab4e6aba06c2c3e69cacc4527674f4744a93e"} Nov 22 09:11:57 crc kubenswrapper[4693]: I1122 09:11:57.627970 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"e6ae9e2b8f86db6c216a3fd15a241cd9ccb98c2d7012f58fd4df602046ba9116"} Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.641402 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" event={"ID":"ac92124f-4062-40d7-9d85-98436f3ea8a6","Type":"ContainerStarted","Data":"7d72ac784d038d0bddea544bd56157cc8c0c37f89a5759fa62b63ddf05378fcb"} Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.641832 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.641876 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.641950 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.663088 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.663757 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:11:59 crc kubenswrapper[4693]: I1122 09:11:59.665368 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" podStartSLOduration=6.665353245 podStartE2EDuration="6.665353245s" podCreationTimestamp="2025-11-22 09:11:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:11:59.664294558 +0000 UTC m=+515.806796849" watchObservedRunningTime="2025-11-22 09:11:59.665353245 +0000 UTC m=+515.807855536" Nov 22 09:12:00 crc kubenswrapper[4693]: I1122 09:12:00.246726 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:12:00 crc kubenswrapper[4693]: I1122 09:12:00.246954 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:12:07 crc kubenswrapper[4693]: I1122 09:12:07.146583 4693 scope.go:117] "RemoveContainer" containerID="1173ab64803d2496086dccda3635309cbc051245ad9b4e5b7bd87bd5f093ba6e" Nov 22 09:12:07 crc kubenswrapper[4693]: E1122 09:12:07.147057 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-2s9rh_openshift-multus(9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7)\"" pod="openshift-multus/multus-2s9rh" podUID="9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.443338 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq"] Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.444455 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.448394 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.449191 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq"] Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.594780 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.594822 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z98fd\" (UniqueName: \"kubernetes.io/projected/d8efb62f-81ca-419d-aff7-56b948083857-kube-api-access-z98fd\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.594859 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.695649 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.695790 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z98fd\" (UniqueName: \"kubernetes.io/projected/d8efb62f-81ca-419d-aff7-56b948083857-kube-api-access-z98fd\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.695900 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.696153 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-util\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.696185 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-bundle\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.709827 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z98fd\" (UniqueName: \"kubernetes.io/projected/d8efb62f-81ca-419d-aff7-56b948083857-kube-api-access-z98fd\") pod \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: I1122 09:12:21.755705 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: E1122 09:12:21.772138 4693 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(317f2d52fc7cedf50a4fae6888dd6c10ba9b54c211f52137a963cb12637ac684): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 09:12:21 crc kubenswrapper[4693]: E1122 09:12:21.772231 4693 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(317f2d52fc7cedf50a4fae6888dd6c10ba9b54c211f52137a963cb12637ac684): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: E1122 09:12:21.772303 4693 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(317f2d52fc7cedf50a4fae6888dd6c10ba9b54c211f52137a963cb12637ac684): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:21 crc kubenswrapper[4693]: E1122 09:12:21.772396 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace(d8efb62f-81ca-419d-aff7-56b948083857)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace(d8efb62f-81ca-419d-aff7-56b948083857)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(317f2d52fc7cedf50a4fae6888dd6c10ba9b54c211f52137a963cb12637ac684): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" podUID="d8efb62f-81ca-419d-aff7-56b948083857" Nov 22 09:12:22 crc kubenswrapper[4693]: I1122 09:12:22.146817 4693 scope.go:117] "RemoveContainer" containerID="1173ab64803d2496086dccda3635309cbc051245ad9b4e5b7bd87bd5f093ba6e" Nov 22 09:12:22 crc kubenswrapper[4693]: I1122 09:12:22.726612 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/2.log" Nov 22 09:12:22 crc kubenswrapper[4693]: I1122 09:12:22.727137 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/1.log" Nov 22 09:12:22 crc kubenswrapper[4693]: I1122 09:12:22.727188 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-2s9rh" event={"ID":"9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7","Type":"ContainerStarted","Data":"0ad53fc27ccc5efb63df6aea5303a8a75df25d29dc95a6bd9810f8619cfe958c"} Nov 22 09:12:22 crc kubenswrapper[4693]: I1122 09:12:22.727206 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:22 crc kubenswrapper[4693]: I1122 09:12:22.727447 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:22 crc kubenswrapper[4693]: E1122 09:12:22.746934 4693 log.go:32] "RunPodSandbox from runtime service failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(d92d3ac5f05ad987c5e56f2ea03587a29954a15ce1f9506b56727dc6ef2adc72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 22 09:12:22 crc kubenswrapper[4693]: E1122 09:12:22.746994 4693 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(d92d3ac5f05ad987c5e56f2ea03587a29954a15ce1f9506b56727dc6ef2adc72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:22 crc kubenswrapper[4693]: E1122 09:12:22.747013 4693 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err="rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(d92d3ac5f05ad987c5e56f2ea03587a29954a15ce1f9506b56727dc6ef2adc72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:22 crc kubenswrapper[4693]: E1122 09:12:22.747058 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace(d8efb62f-81ca-419d-aff7-56b948083857)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace(d8efb62f-81ca-419d-aff7-56b948083857)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_openshift-marketplace_d8efb62f-81ca-419d-aff7-56b948083857_0(d92d3ac5f05ad987c5e56f2ea03587a29954a15ce1f9506b56727dc6ef2adc72): no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\"" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" podUID="d8efb62f-81ca-419d-aff7-56b948083857" Nov 22 09:12:23 crc kubenswrapper[4693]: I1122 09:12:23.715171 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-vqs7m" Nov 22 09:12:24 crc kubenswrapper[4693]: I1122 09:12:24.291628 4693 scope.go:117] "RemoveContainer" containerID="599b5dcfac2b3422b420c4ef551f6e1a96126b45504671612e436051b6e2c40d" Nov 22 09:12:24 crc kubenswrapper[4693]: I1122 09:12:24.303104 4693 scope.go:117] "RemoveContainer" containerID="ef7f3d2efdf6c7739853c586ee572b93f1cd51bb87a76f921e267e2dd8a01199" Nov 22 09:12:24 crc kubenswrapper[4693]: I1122 09:12:24.314434 4693 scope.go:117] "RemoveContainer" containerID="a6c6300b699597cb27041c139639f5c99e0f1ab84c27576f35f376f859afb0cc" Nov 22 09:12:24 crc kubenswrapper[4693]: I1122 09:12:24.322381 4693 scope.go:117] "RemoveContainer" containerID="6387d30ca84c096e122baad7b3c7382480866edc2a419af3a6b02824a5076d6f" Nov 22 09:12:24 crc kubenswrapper[4693]: I1122 09:12:24.742100 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-2s9rh_9ffcea9f-546f-4aa6-aa52-f1f2a96d4ac7/kube-multus/2.log" Nov 22 09:12:30 crc kubenswrapper[4693]: I1122 09:12:30.246633 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:12:30 crc kubenswrapper[4693]: I1122 09:12:30.247120 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:12:33 crc kubenswrapper[4693]: I1122 09:12:33.146305 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:33 crc kubenswrapper[4693]: I1122 09:12:33.146888 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:33 crc kubenswrapper[4693]: I1122 09:12:33.467475 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq"] Nov 22 09:12:33 crc kubenswrapper[4693]: I1122 09:12:33.776290 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" event={"ID":"d8efb62f-81ca-419d-aff7-56b948083857","Type":"ContainerStarted","Data":"bcf0672d98aa125be41c9c32e4cf77b19444c8f441a36b7749e78b67ca510b8e"} Nov 22 09:12:33 crc kubenswrapper[4693]: I1122 09:12:33.776342 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" event={"ID":"d8efb62f-81ca-419d-aff7-56b948083857","Type":"ContainerStarted","Data":"90f4303f55ceabe47c2856dcb3131e7f8a232ea2c89b4befa25e6c41cfaeff08"} Nov 22 09:12:34 crc kubenswrapper[4693]: I1122 09:12:34.780876 4693 generic.go:334] "Generic (PLEG): container finished" podID="d8efb62f-81ca-419d-aff7-56b948083857" containerID="bcf0672d98aa125be41c9c32e4cf77b19444c8f441a36b7749e78b67ca510b8e" exitCode=0 Nov 22 09:12:34 crc kubenswrapper[4693]: I1122 09:12:34.780914 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" event={"ID":"d8efb62f-81ca-419d-aff7-56b948083857","Type":"ContainerDied","Data":"bcf0672d98aa125be41c9c32e4cf77b19444c8f441a36b7749e78b67ca510b8e"} Nov 22 09:12:36 crc kubenswrapper[4693]: I1122 09:12:36.789427 4693 generic.go:334] "Generic (PLEG): container finished" podID="d8efb62f-81ca-419d-aff7-56b948083857" containerID="ee75d13edb76272564cbcb25c2571115f9928e3e9d14b184e06cb3ed316f73b0" exitCode=0 Nov 22 09:12:36 crc kubenswrapper[4693]: I1122 09:12:36.789486 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" event={"ID":"d8efb62f-81ca-419d-aff7-56b948083857","Type":"ContainerDied","Data":"ee75d13edb76272564cbcb25c2571115f9928e3e9d14b184e06cb3ed316f73b0"} Nov 22 09:12:37 crc kubenswrapper[4693]: I1122 09:12:37.796057 4693 generic.go:334] "Generic (PLEG): container finished" podID="d8efb62f-81ca-419d-aff7-56b948083857" containerID="8ee2774468d11f6212e16c5c80696f2c4c1255da16a40b526e08d6183d1817eb" exitCode=0 Nov 22 09:12:37 crc kubenswrapper[4693]: I1122 09:12:37.796121 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" event={"ID":"d8efb62f-81ca-419d-aff7-56b948083857","Type":"ContainerDied","Data":"8ee2774468d11f6212e16c5c80696f2c4c1255da16a40b526e08d6183d1817eb"} Nov 22 09:12:38 crc kubenswrapper[4693]: I1122 09:12:38.951127 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.053822 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-util\") pod \"d8efb62f-81ca-419d-aff7-56b948083857\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.053890 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-bundle\") pod \"d8efb62f-81ca-419d-aff7-56b948083857\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.053955 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z98fd\" (UniqueName: \"kubernetes.io/projected/d8efb62f-81ca-419d-aff7-56b948083857-kube-api-access-z98fd\") pod \"d8efb62f-81ca-419d-aff7-56b948083857\" (UID: \"d8efb62f-81ca-419d-aff7-56b948083857\") " Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.054280 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-bundle" (OuterVolumeSpecName: "bundle") pod "d8efb62f-81ca-419d-aff7-56b948083857" (UID: "d8efb62f-81ca-419d-aff7-56b948083857"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.058269 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8efb62f-81ca-419d-aff7-56b948083857-kube-api-access-z98fd" (OuterVolumeSpecName: "kube-api-access-z98fd") pod "d8efb62f-81ca-419d-aff7-56b948083857" (UID: "d8efb62f-81ca-419d-aff7-56b948083857"). InnerVolumeSpecName "kube-api-access-z98fd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.154668 4693 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.154691 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z98fd\" (UniqueName: \"kubernetes.io/projected/d8efb62f-81ca-419d-aff7-56b948083857-kube-api-access-z98fd\") on node \"crc\" DevicePath \"\"" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.223536 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-util" (OuterVolumeSpecName: "util") pod "d8efb62f-81ca-419d-aff7-56b948083857" (UID: "d8efb62f-81ca-419d-aff7-56b948083857"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.255221 4693 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d8efb62f-81ca-419d-aff7-56b948083857-util\") on node \"crc\" DevicePath \"\"" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.804939 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" event={"ID":"d8efb62f-81ca-419d-aff7-56b948083857","Type":"ContainerDied","Data":"90f4303f55ceabe47c2856dcb3131e7f8a232ea2c89b4befa25e6c41cfaeff08"} Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.804975 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90f4303f55ceabe47c2856dcb3131e7f8a232ea2c89b4befa25e6c41cfaeff08" Nov 22 09:12:39 crc kubenswrapper[4693]: I1122 09:12:39.804978 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.971922 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-htlwb"] Nov 22 09:12:42 crc kubenswrapper[4693]: E1122 09:12:42.972291 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="extract" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.972302 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="extract" Nov 22 09:12:42 crc kubenswrapper[4693]: E1122 09:12:42.972318 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="util" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.972323 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="util" Nov 22 09:12:42 crc kubenswrapper[4693]: E1122 09:12:42.972332 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="pull" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.972338 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="pull" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.972416 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8efb62f-81ca-419d-aff7-56b948083857" containerName="extract" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.972733 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.974201 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.974253 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-8dvmp" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.974548 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.979718 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-htlwb"] Nov 22 09:12:42 crc kubenswrapper[4693]: I1122 09:12:42.993018 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6b4t\" (UniqueName: \"kubernetes.io/projected/ff3958b0-cd30-4470-af03-214de7183eca-kube-api-access-f6b4t\") pod \"nmstate-operator-557fdffb88-htlwb\" (UID: \"ff3958b0-cd30-4470-af03-214de7183eca\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" Nov 22 09:12:43 crc kubenswrapper[4693]: I1122 09:12:43.093641 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6b4t\" (UniqueName: \"kubernetes.io/projected/ff3958b0-cd30-4470-af03-214de7183eca-kube-api-access-f6b4t\") pod \"nmstate-operator-557fdffb88-htlwb\" (UID: \"ff3958b0-cd30-4470-af03-214de7183eca\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" Nov 22 09:12:43 crc kubenswrapper[4693]: I1122 09:12:43.108024 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6b4t\" (UniqueName: \"kubernetes.io/projected/ff3958b0-cd30-4470-af03-214de7183eca-kube-api-access-f6b4t\") pod \"nmstate-operator-557fdffb88-htlwb\" (UID: \"ff3958b0-cd30-4470-af03-214de7183eca\") " pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" Nov 22 09:12:43 crc kubenswrapper[4693]: I1122 09:12:43.284444 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" Nov 22 09:12:43 crc kubenswrapper[4693]: I1122 09:12:43.626699 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-557fdffb88-htlwb"] Nov 22 09:12:43 crc kubenswrapper[4693]: I1122 09:12:43.820100 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" event={"ID":"ff3958b0-cd30-4470-af03-214de7183eca","Type":"ContainerStarted","Data":"8a105a5909a543ff93b83801cf49129e03d3ba3b3d1b6234e3d9fffe521b8f47"} Nov 22 09:12:45 crc kubenswrapper[4693]: I1122 09:12:45.828662 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" event={"ID":"ff3958b0-cd30-4470-af03-214de7183eca","Type":"ContainerStarted","Data":"daaf9e1ad9b3beaf119697969860e4a463a81c757625ad40bbf7339fb7f6973c"} Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.819250 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-557fdffb88-htlwb" podStartSLOduration=8.080423427 podStartE2EDuration="9.8192357s" podCreationTimestamp="2025-11-22 09:12:42 +0000 UTC" firstStartedPulling="2025-11-22 09:12:43.634785405 +0000 UTC m=+559.777287696" lastFinishedPulling="2025-11-22 09:12:45.373597678 +0000 UTC m=+561.516099969" observedRunningTime="2025-11-22 09:12:45.850265105 +0000 UTC m=+561.992767395" watchObservedRunningTime="2025-11-22 09:12:51.8192357 +0000 UTC m=+567.961737992" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.820652 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.821473 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.824312 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.824878 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.830464 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-j7mnr" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.830464 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.838203 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-5vqxd"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.838745 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.841657 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.845348 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.917177 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.917870 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.919190 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.919342 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.919753 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-x8nlx" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.923390 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq"] Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.979996 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e808ce78-c21f-414a-84cc-0f9b6e6154aa-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-w59dj\" (UID: \"e808ce78-c21f-414a-84cc-0f9b6e6154aa\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.980182 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rlgvf\" (UniqueName: \"kubernetes.io/projected/cebd96db-0d01-4c74-a89f-d07b10c6fab8-kube-api-access-rlgvf\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.980260 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-dbus-socket\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.980372 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gjvp9\" (UniqueName: \"kubernetes.io/projected/9c890510-4900-4b57-a97e-c15267309d74-kube-api-access-gjvp9\") pod \"nmstate-metrics-5dcf9c57c5-rkrtz\" (UID: \"9c890510-4900-4b57-a97e-c15267309d74\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.980436 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-ovs-socket\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.980553 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gv9w6\" (UniqueName: \"kubernetes.io/projected/e808ce78-c21f-414a-84cc-0f9b6e6154aa-kube-api-access-gv9w6\") pod \"nmstate-webhook-6b89b748d8-w59dj\" (UID: \"e808ce78-c21f-414a-84cc-0f9b6e6154aa\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:51 crc kubenswrapper[4693]: I1122 09:12:51.980617 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-nmstate-lock\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.070360 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-58f486b648-m2554"] Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.070903 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081471 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081674 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gv9w6\" (UniqueName: \"kubernetes.io/projected/e808ce78-c21f-414a-84cc-0f9b6e6154aa-kube-api-access-gv9w6\") pod \"nmstate-webhook-6b89b748d8-w59dj\" (UID: \"e808ce78-c21f-414a-84cc-0f9b6e6154aa\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081698 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-nmstate-lock\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081718 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081734 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e808ce78-c21f-414a-84cc-0f9b6e6154aa-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-w59dj\" (UID: \"e808ce78-c21f-414a-84cc-0f9b6e6154aa\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081759 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rlgvf\" (UniqueName: \"kubernetes.io/projected/cebd96db-0d01-4c74-a89f-d07b10c6fab8-kube-api-access-rlgvf\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081773 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-dbus-socket\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081798 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q74l6\" (UniqueName: \"kubernetes.io/projected/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-kube-api-access-q74l6\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081816 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gjvp9\" (UniqueName: \"kubernetes.io/projected/9c890510-4900-4b57-a97e-c15267309d74-kube-api-access-gjvp9\") pod \"nmstate-metrics-5dcf9c57c5-rkrtz\" (UID: \"9c890510-4900-4b57-a97e-c15267309d74\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081831 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-ovs-socket\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081922 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-ovs-socket\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.081772 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-nmstate-lock\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.082169 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/cebd96db-0d01-4c74-a89f-d07b10c6fab8-dbus-socket\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.087027 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/e808ce78-c21f-414a-84cc-0f9b6e6154aa-tls-key-pair\") pod \"nmstate-webhook-6b89b748d8-w59dj\" (UID: \"e808ce78-c21f-414a-84cc-0f9b6e6154aa\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.100156 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rlgvf\" (UniqueName: \"kubernetes.io/projected/cebd96db-0d01-4c74-a89f-d07b10c6fab8-kube-api-access-rlgvf\") pod \"nmstate-handler-5vqxd\" (UID: \"cebd96db-0d01-4c74-a89f-d07b10c6fab8\") " pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.110628 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gv9w6\" (UniqueName: \"kubernetes.io/projected/e808ce78-c21f-414a-84cc-0f9b6e6154aa-kube-api-access-gv9w6\") pod \"nmstate-webhook-6b89b748d8-w59dj\" (UID: \"e808ce78-c21f-414a-84cc-0f9b6e6154aa\") " pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.112505 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gjvp9\" (UniqueName: \"kubernetes.io/projected/9c890510-4900-4b57-a97e-c15267309d74-kube-api-access-gjvp9\") pod \"nmstate-metrics-5dcf9c57c5-rkrtz\" (UID: \"9c890510-4900-4b57-a97e-c15267309d74\") " pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.134247 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.140012 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-58f486b648-m2554"] Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.142605 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.150907 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183227 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183263 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-service-ca\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183290 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qmjcm\" (UniqueName: \"kubernetes.io/projected/f4d0de16-0203-4b12-a656-0d5d141b661d-kube-api-access-qmjcm\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183305 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-trusted-ca-bundle\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183323 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-oauth-serving-cert\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183341 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f4d0de16-0203-4b12-a656-0d5d141b661d-console-serving-cert\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183352 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f4d0de16-0203-4b12-a656-0d5d141b661d-console-oauth-config\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183380 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q74l6\" (UniqueName: \"kubernetes.io/projected/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-kube-api-access-q74l6\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183402 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-console-config\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.183429 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.184455 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-nginx-conf\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.186334 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-plugin-serving-cert\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: W1122 09:12:52.186964 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcebd96db_0d01_4c74_a89f_d07b10c6fab8.slice/crio-c2329021b7d7fe23bf8af953ff3e14a1f9264d0fa8342135e021b2d86fdfc4eb WatchSource:0}: Error finding container c2329021b7d7fe23bf8af953ff3e14a1f9264d0fa8342135e021b2d86fdfc4eb: Status 404 returned error can't find the container with id c2329021b7d7fe23bf8af953ff3e14a1f9264d0fa8342135e021b2d86fdfc4eb Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.197479 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q74l6\" (UniqueName: \"kubernetes.io/projected/b1a9c594-5414-44e7-a2ae-e1bd9fca29a3-kube-api-access-q74l6\") pod \"nmstate-console-plugin-5874bd7bc5-l4gbq\" (UID: \"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3\") " pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.229481 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.284615 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-console-config\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285367 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-service-ca\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285407 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qmjcm\" (UniqueName: \"kubernetes.io/projected/f4d0de16-0203-4b12-a656-0d5d141b661d-kube-api-access-qmjcm\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285423 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-trusted-ca-bundle\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285452 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-oauth-serving-cert\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285492 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f4d0de16-0203-4b12-a656-0d5d141b661d-console-serving-cert\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285507 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f4d0de16-0203-4b12-a656-0d5d141b661d-console-oauth-config\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.285710 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-console-config\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.286259 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-oauth-serving-cert\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.287226 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-trusted-ca-bundle\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.287227 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f4d0de16-0203-4b12-a656-0d5d141b661d-service-ca\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.288945 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f4d0de16-0203-4b12-a656-0d5d141b661d-console-serving-cert\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.289221 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f4d0de16-0203-4b12-a656-0d5d141b661d-console-oauth-config\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.298661 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qmjcm\" (UniqueName: \"kubernetes.io/projected/f4d0de16-0203-4b12-a656-0d5d141b661d-kube-api-access-qmjcm\") pod \"console-58f486b648-m2554\" (UID: \"f4d0de16-0203-4b12-a656-0d5d141b661d\") " pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.381256 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-58f486b648-m2554" Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.486987 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz"] Nov 22 09:12:52 crc kubenswrapper[4693]: W1122 09:12:52.493222 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9c890510_4900_4b57_a97e_c15267309d74.slice/crio-12a03b85bcd148b8aeb38c357cf352b20527925c2a1eedad3676b3edd514e0c3 WatchSource:0}: Error finding container 12a03b85bcd148b8aeb38c357cf352b20527925c2a1eedad3676b3edd514e0c3: Status 404 returned error can't find the container with id 12a03b85bcd148b8aeb38c357cf352b20527925c2a1eedad3676b3edd514e0c3 Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.515913 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj"] Nov 22 09:12:52 crc kubenswrapper[4693]: W1122 09:12:52.517875 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode808ce78_c21f_414a_84cc_0f9b6e6154aa.slice/crio-6bfe6250ddbf4b70da0549c298487c8c6a4f74ae40121eb86394a3c5846b7d19 WatchSource:0}: Error finding container 6bfe6250ddbf4b70da0549c298487c8c6a4f74ae40121eb86394a3c5846b7d19: Status 404 returned error can't find the container with id 6bfe6250ddbf4b70da0549c298487c8c6a4f74ae40121eb86394a3c5846b7d19 Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.562879 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq"] Nov 22 09:12:52 crc kubenswrapper[4693]: W1122 09:12:52.564525 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb1a9c594_5414_44e7_a2ae_e1bd9fca29a3.slice/crio-67e799e0ae1ec3d61fb5955032f832332b7e3e038afa03376c1f1bfc37ee2bc7 WatchSource:0}: Error finding container 67e799e0ae1ec3d61fb5955032f832332b7e3e038afa03376c1f1bfc37ee2bc7: Status 404 returned error can't find the container with id 67e799e0ae1ec3d61fb5955032f832332b7e3e038afa03376c1f1bfc37ee2bc7 Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.699648 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-58f486b648-m2554"] Nov 22 09:12:52 crc kubenswrapper[4693]: W1122 09:12:52.702255 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4d0de16_0203_4b12_a656_0d5d141b661d.slice/crio-a14d978b70b8f7025c5d7d218bc066a2f5d75843b458a18caf361fc48fbe2ecd WatchSource:0}: Error finding container a14d978b70b8f7025c5d7d218bc066a2f5d75843b458a18caf361fc48fbe2ecd: Status 404 returned error can't find the container with id a14d978b70b8f7025c5d7d218bc066a2f5d75843b458a18caf361fc48fbe2ecd Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.852273 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" event={"ID":"9c890510-4900-4b57-a97e-c15267309d74","Type":"ContainerStarted","Data":"12a03b85bcd148b8aeb38c357cf352b20527925c2a1eedad3676b3edd514e0c3"} Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.853179 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" event={"ID":"e808ce78-c21f-414a-84cc-0f9b6e6154aa","Type":"ContainerStarted","Data":"6bfe6250ddbf4b70da0549c298487c8c6a4f74ae40121eb86394a3c5846b7d19"} Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.854220 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58f486b648-m2554" event={"ID":"f4d0de16-0203-4b12-a656-0d5d141b661d","Type":"ContainerStarted","Data":"d8d96a346e4c168bc10a64cc85401da1573cfe1cb24abcfd0aba04de64e93470"} Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.854256 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-58f486b648-m2554" event={"ID":"f4d0de16-0203-4b12-a656-0d5d141b661d","Type":"ContainerStarted","Data":"a14d978b70b8f7025c5d7d218bc066a2f5d75843b458a18caf361fc48fbe2ecd"} Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.855323 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-5vqxd" event={"ID":"cebd96db-0d01-4c74-a89f-d07b10c6fab8","Type":"ContainerStarted","Data":"c2329021b7d7fe23bf8af953ff3e14a1f9264d0fa8342135e021b2d86fdfc4eb"} Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.856282 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" event={"ID":"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3","Type":"ContainerStarted","Data":"67e799e0ae1ec3d61fb5955032f832332b7e3e038afa03376c1f1bfc37ee2bc7"} Nov 22 09:12:52 crc kubenswrapper[4693]: I1122 09:12:52.866024 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-58f486b648-m2554" podStartSLOduration=0.866009606 podStartE2EDuration="866.009606ms" podCreationTimestamp="2025-11-22 09:12:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:12:52.864674759 +0000 UTC m=+569.007177050" watchObservedRunningTime="2025-11-22 09:12:52.866009606 +0000 UTC m=+569.008511898" Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.870907 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" event={"ID":"9c890510-4900-4b57-a97e-c15267309d74","Type":"ContainerStarted","Data":"ea823dec2c1f024fa9a362a2b1ca3f5b790b7031c3f0cb8b568139be0cc5e098"} Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.872405 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" event={"ID":"e808ce78-c21f-414a-84cc-0f9b6e6154aa","Type":"ContainerStarted","Data":"fb6b4d7047fe3700dd0d4c6f979c0ab7076564fde868b4cd4c3c085fff1c7331"} Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.872514 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.874795 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-5vqxd" event={"ID":"cebd96db-0d01-4c74-a89f-d07b10c6fab8","Type":"ContainerStarted","Data":"65e8bc437d53e1563165747d122f66d92cd070d3f058e9549160b344f3b7af33"} Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.874888 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.875870 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" event={"ID":"b1a9c594-5414-44e7-a2ae-e1bd9fca29a3","Type":"ContainerStarted","Data":"5209ee6711a63a8886c0b776f7fe9b760a736b9ddf1e40ef71c7c37f01b86577"} Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.883524 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" podStartSLOduration=2.246253703 podStartE2EDuration="4.883513532s" podCreationTimestamp="2025-11-22 09:12:51 +0000 UTC" firstStartedPulling="2025-11-22 09:12:52.519793955 +0000 UTC m=+568.662296245" lastFinishedPulling="2025-11-22 09:12:55.157053782 +0000 UTC m=+571.299556074" observedRunningTime="2025-11-22 09:12:55.88202171 +0000 UTC m=+572.024524001" watchObservedRunningTime="2025-11-22 09:12:55.883513532 +0000 UTC m=+572.026015823" Nov 22 09:12:55 crc kubenswrapper[4693]: I1122 09:12:55.891662 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-5vqxd" podStartSLOduration=1.9167757010000002 podStartE2EDuration="4.891654439s" podCreationTimestamp="2025-11-22 09:12:51 +0000 UTC" firstStartedPulling="2025-11-22 09:12:52.189356659 +0000 UTC m=+568.331858949" lastFinishedPulling="2025-11-22 09:12:55.164235396 +0000 UTC m=+571.306737687" observedRunningTime="2025-11-22 09:12:55.890783622 +0000 UTC m=+572.033285913" watchObservedRunningTime="2025-11-22 09:12:55.891654439 +0000 UTC m=+572.034156729" Nov 22 09:12:57 crc kubenswrapper[4693]: I1122 09:12:57.886054 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" event={"ID":"9c890510-4900-4b57-a97e-c15267309d74","Type":"ContainerStarted","Data":"c47e22797e24b495da4285e14f6ba42c91085f4d0662d9942879764bf60ae9de"} Nov 22 09:12:57 crc kubenswrapper[4693]: I1122 09:12:57.897002 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-5dcf9c57c5-rkrtz" podStartSLOduration=2.437740228 podStartE2EDuration="6.896989449s" podCreationTimestamp="2025-11-22 09:12:51 +0000 UTC" firstStartedPulling="2025-11-22 09:12:52.495135834 +0000 UTC m=+568.637638126" lastFinishedPulling="2025-11-22 09:12:56.954385056 +0000 UTC m=+573.096887347" observedRunningTime="2025-11-22 09:12:57.896311153 +0000 UTC m=+574.038813445" watchObservedRunningTime="2025-11-22 09:12:57.896989449 +0000 UTC m=+574.039491739" Nov 22 09:12:57 crc kubenswrapper[4693]: I1122 09:12:57.897904 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-5874bd7bc5-l4gbq" podStartSLOduration=4.307304148 podStartE2EDuration="6.897897965s" podCreationTimestamp="2025-11-22 09:12:51 +0000 UTC" firstStartedPulling="2025-11-22 09:12:52.566279586 +0000 UTC m=+568.708781877" lastFinishedPulling="2025-11-22 09:12:55.156873403 +0000 UTC m=+571.299375694" observedRunningTime="2025-11-22 09:12:55.906551298 +0000 UTC m=+572.049053589" watchObservedRunningTime="2025-11-22 09:12:57.897897965 +0000 UTC m=+574.040400256" Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.246647 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.246901 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.246937 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.247300 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"329ed12425e201bb8bae43071b579a5f24384df493cc4b6bb11f0437b6b63f96"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.247347 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://329ed12425e201bb8bae43071b579a5f24384df493cc4b6bb11f0437b6b63f96" gracePeriod=600 Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.900955 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="329ed12425e201bb8bae43071b579a5f24384df493cc4b6bb11f0437b6b63f96" exitCode=0 Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.901014 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"329ed12425e201bb8bae43071b579a5f24384df493cc4b6bb11f0437b6b63f96"} Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.901234 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"fdd620c02223caa992b894c417a67e53ca6a47a2b100ed782945632677fd5de7"} Nov 22 09:13:00 crc kubenswrapper[4693]: I1122 09:13:00.901252 4693 scope.go:117] "RemoveContainer" containerID="8e9b2098a9f7737f670f889afec51d85ba47f7877ce014bd4df47229fe7ecbfd" Nov 22 09:13:02 crc kubenswrapper[4693]: I1122 09:13:02.164803 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-5vqxd" Nov 22 09:13:02 crc kubenswrapper[4693]: I1122 09:13:02.381942 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-58f486b648-m2554" Nov 22 09:13:02 crc kubenswrapper[4693]: I1122 09:13:02.382255 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-58f486b648-m2554" Nov 22 09:13:02 crc kubenswrapper[4693]: I1122 09:13:02.385255 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-58f486b648-m2554" Nov 22 09:13:02 crc kubenswrapper[4693]: I1122 09:13:02.916209 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-58f486b648-m2554" Nov 22 09:13:02 crc kubenswrapper[4693]: I1122 09:13:02.948113 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m5ldq"] Nov 22 09:13:12 crc kubenswrapper[4693]: I1122 09:13:12.153391 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6b89b748d8-w59dj" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.369798 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg"] Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.370991 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.372229 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.375245 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg"] Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.479604 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4w4tz\" (UniqueName: \"kubernetes.io/projected/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-kube-api-access-4w4tz\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.479760 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.479833 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.580608 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4w4tz\" (UniqueName: \"kubernetes.io/projected/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-kube-api-access-4w4tz\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.580689 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.580713 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.581130 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-bundle\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.581242 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-util\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.595493 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4w4tz\" (UniqueName: \"kubernetes.io/projected/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-kube-api-access-4w4tz\") pod \"e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:21 crc kubenswrapper[4693]: I1122 09:13:21.683386 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:22 crc kubenswrapper[4693]: I1122 09:13:22.010757 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg"] Nov 22 09:13:22 crc kubenswrapper[4693]: I1122 09:13:22.991670 4693 generic.go:334] "Generic (PLEG): container finished" podID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerID="69d5177d2dd7bc7f9a66246fede91deca1dfc51f03d9b86b2e3e985ed10c2c16" exitCode=0 Nov 22 09:13:22 crc kubenswrapper[4693]: I1122 09:13:22.991714 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" event={"ID":"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7","Type":"ContainerDied","Data":"69d5177d2dd7bc7f9a66246fede91deca1dfc51f03d9b86b2e3e985ed10c2c16"} Nov 22 09:13:22 crc kubenswrapper[4693]: I1122 09:13:22.991760 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" event={"ID":"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7","Type":"ContainerStarted","Data":"5124546b68b2da88c8735730ec0570bc1e563221bcfa70c5942d812946e76fe6"} Nov 22 09:13:25 crc kubenswrapper[4693]: I1122 09:13:25.001237 4693 generic.go:334] "Generic (PLEG): container finished" podID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerID="4ab63722857e82d978fb243ed47873d71691ddee96e1865d9692cd8d73a27085" exitCode=0 Nov 22 09:13:25 crc kubenswrapper[4693]: I1122 09:13:25.001339 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" event={"ID":"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7","Type":"ContainerDied","Data":"4ab63722857e82d978fb243ed47873d71691ddee96e1865d9692cd8d73a27085"} Nov 22 09:13:26 crc kubenswrapper[4693]: I1122 09:13:26.006533 4693 generic.go:334] "Generic (PLEG): container finished" podID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerID="61d9c1581ddac8c02a30af91e33575ac25e3056a220ad5e71b0b6b12f47ea44a" exitCode=0 Nov 22 09:13:26 crc kubenswrapper[4693]: I1122 09:13:26.006596 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" event={"ID":"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7","Type":"ContainerDied","Data":"61d9c1581ddac8c02a30af91e33575ac25e3056a220ad5e71b0b6b12f47ea44a"} Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.181733 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.336247 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-bundle\") pod \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.336297 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-util\") pod \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.336336 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4w4tz\" (UniqueName: \"kubernetes.io/projected/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-kube-api-access-4w4tz\") pod \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\" (UID: \"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7\") " Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.337356 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-bundle" (OuterVolumeSpecName: "bundle") pod "650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" (UID: "650bf5b9-0977-4b28-bd9d-8d2518dfc4d7"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.340997 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-kube-api-access-4w4tz" (OuterVolumeSpecName: "kube-api-access-4w4tz") pod "650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" (UID: "650bf5b9-0977-4b28-bd9d-8d2518dfc4d7"). InnerVolumeSpecName "kube-api-access-4w4tz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.343857 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-util" (OuterVolumeSpecName: "util") pod "650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" (UID: "650bf5b9-0977-4b28-bd9d-8d2518dfc4d7"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.438341 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4w4tz\" (UniqueName: \"kubernetes.io/projected/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-kube-api-access-4w4tz\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.438369 4693 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.438379 4693 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/650bf5b9-0977-4b28-bd9d-8d2518dfc4d7-util\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:27 crc kubenswrapper[4693]: I1122 09:13:27.974060 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-m5ldq" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerName="console" containerID="cri-o://e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e" gracePeriod=15 Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.015858 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" event={"ID":"650bf5b9-0977-4b28-bd9d-8d2518dfc4d7","Type":"ContainerDied","Data":"5124546b68b2da88c8735730ec0570bc1e563221bcfa70c5942d812946e76fe6"} Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.015891 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5124546b68b2da88c8735730ec0570bc1e563221bcfa70c5942d812946e76fe6" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.015923 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.243589 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m5ldq_8cd5b74f-7a92-4b0f-9846-e9afd22fc091/console/0.log" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.243662 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347262 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347302 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-serving-cert\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347325 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347343 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347359 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347384 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4wtbn\" (UniqueName: \"kubernetes.io/projected/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-kube-api-access-4wtbn\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.347458 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-config\") pod \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\" (UID: \"8cd5b74f-7a92-4b0f-9846-e9afd22fc091\") " Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.348031 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.348063 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-config" (OuterVolumeSpecName: "console-config") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.348356 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca" (OuterVolumeSpecName: "service-ca") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.348368 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.350782 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.351003 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.351155 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-kube-api-access-4wtbn" (OuterVolumeSpecName: "kube-api-access-4wtbn") pod "8cd5b74f-7a92-4b0f-9846-e9afd22fc091" (UID: "8cd5b74f-7a92-4b0f-9846-e9afd22fc091"). InnerVolumeSpecName "kube-api-access-4wtbn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448711 4693 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448735 4693 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448746 4693 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448754 4693 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448762 4693 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-service-ca\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448770 4693 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:28 crc kubenswrapper[4693]: I1122 09:13:28.448777 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4wtbn\" (UniqueName: \"kubernetes.io/projected/8cd5b74f-7a92-4b0f-9846-e9afd22fc091-kube-api-access-4wtbn\") on node \"crc\" DevicePath \"\"" Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.020355 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-m5ldq_8cd5b74f-7a92-4b0f-9846-e9afd22fc091/console/0.log" Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.020594 4693 generic.go:334] "Generic (PLEG): container finished" podID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerID="e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e" exitCode=2 Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.020622 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5ldq" event={"ID":"8cd5b74f-7a92-4b0f-9846-e9afd22fc091","Type":"ContainerDied","Data":"e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e"} Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.020650 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-m5ldq" event={"ID":"8cd5b74f-7a92-4b0f-9846-e9afd22fc091","Type":"ContainerDied","Data":"3bda24df55ea83b4a785b18dbda933d1af9095609ba6bcc04a364d5ad4ab1cdb"} Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.020666 4693 scope.go:117] "RemoveContainer" containerID="e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e" Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.020667 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-m5ldq" Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.033802 4693 scope.go:117] "RemoveContainer" containerID="e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e" Nov 22 09:13:29 crc kubenswrapper[4693]: E1122 09:13:29.034096 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e\": container with ID starting with e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e not found: ID does not exist" containerID="e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e" Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.034128 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e"} err="failed to get container status \"e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e\": rpc error: code = NotFound desc = could not find container \"e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e\": container with ID starting with e7edf2b47f9b0bde20c92cdde51544d33e9958498c64a864ae8e09a24087ff6e not found: ID does not exist" Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.038309 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-m5ldq"] Nov 22 09:13:29 crc kubenswrapper[4693]: I1122 09:13:29.040437 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-m5ldq"] Nov 22 09:13:30 crc kubenswrapper[4693]: I1122 09:13:30.151600 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" path="/var/lib/kubelet/pods/8cd5b74f-7a92-4b0f-9846-e9afd22fc091/volumes" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.021945 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn"] Nov 22 09:13:38 crc kubenswrapper[4693]: E1122 09:13:38.022695 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="extract" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.022707 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="extract" Nov 22 09:13:38 crc kubenswrapper[4693]: E1122 09:13:38.022726 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="util" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.022732 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="util" Nov 22 09:13:38 crc kubenswrapper[4693]: E1122 09:13:38.022743 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerName="console" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.022749 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerName="console" Nov 22 09:13:38 crc kubenswrapper[4693]: E1122 09:13:38.022768 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="pull" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.022780 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="pull" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.022948 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cd5b74f-7a92-4b0f-9846-e9afd22fc091" containerName="console" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.022965 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="650bf5b9-0977-4b28-bd9d-8d2518dfc4d7" containerName="extract" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.023485 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.025699 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.025716 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.025893 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.026042 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn"] Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.027305 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.027575 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-r2hz7" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.155406 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04bf9ebb-5541-4144-879a-1ac25382249d-apiservice-cert\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.155518 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8dxr\" (UniqueName: \"kubernetes.io/projected/04bf9ebb-5541-4144-879a-1ac25382249d-kube-api-access-n8dxr\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.155566 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04bf9ebb-5541-4144-879a-1ac25382249d-webhook-cert\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.256443 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04bf9ebb-5541-4144-879a-1ac25382249d-webhook-cert\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.256722 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04bf9ebb-5541-4144-879a-1ac25382249d-apiservice-cert\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.256763 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8dxr\" (UniqueName: \"kubernetes.io/projected/04bf9ebb-5541-4144-879a-1ac25382249d-kube-api-access-n8dxr\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.261121 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/04bf9ebb-5541-4144-879a-1ac25382249d-apiservice-cert\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.261133 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/04bf9ebb-5541-4144-879a-1ac25382249d-webhook-cert\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.269778 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8dxr\" (UniqueName: \"kubernetes.io/projected/04bf9ebb-5541-4144-879a-1ac25382249d-kube-api-access-n8dxr\") pod \"metallb-operator-controller-manager-688d9f6dd-24fdn\" (UID: \"04bf9ebb-5541-4144-879a-1ac25382249d\") " pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.320587 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-df9cf6-744f8"] Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.321169 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.323873 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-q5klq" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.323899 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.324589 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.331646 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-df9cf6-744f8"] Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.337299 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.462082 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d60d2419-520e-4156-9d05-3b174542f80e-webhook-cert\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.462360 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbm9b\" (UniqueName: \"kubernetes.io/projected/d60d2419-520e-4156-9d05-3b174542f80e-kube-api-access-vbm9b\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.462389 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d60d2419-520e-4156-9d05-3b174542f80e-apiservice-cert\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.540646 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn"] Nov 22 09:13:38 crc kubenswrapper[4693]: W1122 09:13:38.546700 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04bf9ebb_5541_4144_879a_1ac25382249d.slice/crio-2e5c6429158d2f1ea71e8596bda88981a322f99b411abeb02ef95ab1a048dce0 WatchSource:0}: Error finding container 2e5c6429158d2f1ea71e8596bda88981a322f99b411abeb02ef95ab1a048dce0: Status 404 returned error can't find the container with id 2e5c6429158d2f1ea71e8596bda88981a322f99b411abeb02ef95ab1a048dce0 Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.563355 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbm9b\" (UniqueName: \"kubernetes.io/projected/d60d2419-520e-4156-9d05-3b174542f80e-kube-api-access-vbm9b\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.563398 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d60d2419-520e-4156-9d05-3b174542f80e-apiservice-cert\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.563425 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d60d2419-520e-4156-9d05-3b174542f80e-webhook-cert\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.568811 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d60d2419-520e-4156-9d05-3b174542f80e-webhook-cert\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.571826 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d60d2419-520e-4156-9d05-3b174542f80e-apiservice-cert\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.576959 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbm9b\" (UniqueName: \"kubernetes.io/projected/d60d2419-520e-4156-9d05-3b174542f80e-kube-api-access-vbm9b\") pod \"metallb-operator-webhook-server-df9cf6-744f8\" (UID: \"d60d2419-520e-4156-9d05-3b174542f80e\") " pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.634973 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:38 crc kubenswrapper[4693]: I1122 09:13:38.971069 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-df9cf6-744f8"] Nov 22 09:13:39 crc kubenswrapper[4693]: I1122 09:13:39.056943 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" event={"ID":"04bf9ebb-5541-4144-879a-1ac25382249d","Type":"ContainerStarted","Data":"2e5c6429158d2f1ea71e8596bda88981a322f99b411abeb02ef95ab1a048dce0"} Nov 22 09:13:39 crc kubenswrapper[4693]: I1122 09:13:39.057989 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" event={"ID":"d60d2419-520e-4156-9d05-3b174542f80e","Type":"ContainerStarted","Data":"d484e120710efd092c8bb02baf7a99a2d6e94e7181376000343ed14d2e48069b"} Nov 22 09:13:42 crc kubenswrapper[4693]: I1122 09:13:42.076755 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" event={"ID":"04bf9ebb-5541-4144-879a-1ac25382249d","Type":"ContainerStarted","Data":"436ab62b021fb58a1bb6e9d1a69e16e776d54e703aa5523cb375420e7f43e555"} Nov 22 09:13:42 crc kubenswrapper[4693]: I1122 09:13:42.077181 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:13:42 crc kubenswrapper[4693]: I1122 09:13:42.095426 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" podStartSLOduration=2.571941348 podStartE2EDuration="5.095408072s" podCreationTimestamp="2025-11-22 09:13:37 +0000 UTC" firstStartedPulling="2025-11-22 09:13:38.549998197 +0000 UTC m=+614.692500487" lastFinishedPulling="2025-11-22 09:13:41.07346492 +0000 UTC m=+617.215967211" observedRunningTime="2025-11-22 09:13:42.090455148 +0000 UTC m=+618.232957439" watchObservedRunningTime="2025-11-22 09:13:42.095408072 +0000 UTC m=+618.237910363" Nov 22 09:13:43 crc kubenswrapper[4693]: I1122 09:13:43.082732 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" event={"ID":"d60d2419-520e-4156-9d05-3b174542f80e","Type":"ContainerStarted","Data":"166288f4c179dda2baa1046dc51daeda56b07b23ed19e26faa1f48fdd4a7f098"} Nov 22 09:13:43 crc kubenswrapper[4693]: I1122 09:13:43.096359 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" podStartSLOduration=1.92922195 podStartE2EDuration="5.096343673s" podCreationTimestamp="2025-11-22 09:13:38 +0000 UTC" firstStartedPulling="2025-11-22 09:13:38.976383825 +0000 UTC m=+615.118886117" lastFinishedPulling="2025-11-22 09:13:42.143505548 +0000 UTC m=+618.286007840" observedRunningTime="2025-11-22 09:13:43.093725094 +0000 UTC m=+619.236227385" watchObservedRunningTime="2025-11-22 09:13:43.096343673 +0000 UTC m=+619.238845964" Nov 22 09:13:44 crc kubenswrapper[4693]: I1122 09:13:44.086570 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:13:58 crc kubenswrapper[4693]: I1122 09:13:58.640120 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-df9cf6-744f8" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.339888 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-688d9f6dd-24fdn" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.866723 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh"] Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.867575 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.868909 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-rps7r"] Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.869698 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.869925 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-xrwxc" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.870811 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: W1122 09:14:18.871814 4693 reflector.go:561] object-"metallb-system"/"frr-startup": failed to list *v1.ConfigMap: configmaps "frr-startup" is forbidden: User "system:node:crc" cannot list resource "configmaps" in API group "" in the namespace "metallb-system": no relationship found between node 'crc' and this object Nov 22 09:14:18 crc kubenswrapper[4693]: E1122 09:14:18.871869 4693 reflector.go:158] "Unhandled Error" err="object-\"metallb-system\"/\"frr-startup\": Failed to watch *v1.ConfigMap: failed to list *v1.ConfigMap: configmaps \"frr-startup\" is forbidden: User \"system:node:crc\" cannot list resource \"configmaps\" in API group \"\" in the namespace \"metallb-system\": no relationship found between node 'crc' and this object" logger="UnhandledError" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.874514 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.878935 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh"] Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902267 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-startup\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902345 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-metrics\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902379 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/22957d7b-20a3-4b2d-af88-d0a93924eec8-metrics-certs\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902398 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qgsb\" (UniqueName: \"kubernetes.io/projected/d3923021-6731-4289-a3ce-f78f990d6d61-kube-api-access-8qgsb\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902416 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-sockets\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902503 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-reloader\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902535 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-conf\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902555 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxb8w\" (UniqueName: \"kubernetes.io/projected/22957d7b-20a3-4b2d-af88-d0a93924eec8-kube-api-access-rxb8w\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.902573 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3923021-6731-4289-a3ce-f78f990d6d61-cert\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.931511 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-5gdzr"] Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.932805 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-5gdzr" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.934223 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-fqbqr" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.934470 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.934679 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.935492 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.938882 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6c7b4b5f48-j5p4z"] Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.940039 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.941080 4693 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Nov 22 09:14:18 crc kubenswrapper[4693]: I1122 09:14:18.943546 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-j5p4z"] Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003272 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxb8w\" (UniqueName: \"kubernetes.io/projected/22957d7b-20a3-4b2d-af88-d0a93924eec8-kube-api-access-rxb8w\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003411 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3923021-6731-4289-a3ce-f78f990d6d61-cert\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003470 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-startup\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003569 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a9bc621d-a044-4e19-99d8-297dd534f390-metallb-excludel2\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003612 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgqpx\" (UniqueName: \"kubernetes.io/projected/a9bc621d-a044-4e19-99d8-297dd534f390-kube-api-access-bgqpx\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.003638 4693 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003659 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-metrics\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.003753 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d3923021-6731-4289-a3ce-f78f990d6d61-cert podName:d3923021-6731-4289-a3ce-f78f990d6d61 nodeName:}" failed. No retries permitted until 2025-11-22 09:14:19.503726249 +0000 UTC m=+655.646228541 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/d3923021-6731-4289-a3ce-f78f990d6d61-cert") pod "frr-k8s-webhook-server-6998585d5-dlqvh" (UID: "d3923021-6731-4289-a3ce-f78f990d6d61") : secret "frr-k8s-webhook-server-cert" not found Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003795 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-metrics-certs\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003906 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/22957d7b-20a3-4b2d-af88-d0a93924eec8-metrics-certs\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003961 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qgsb\" (UniqueName: \"kubernetes.io/projected/d3923021-6731-4289-a3ce-f78f990d6d61-kube-api-access-8qgsb\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.003982 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-sockets\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004046 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7plgw\" (UniqueName: \"kubernetes.io/projected/a3aed62f-5abf-4446-9c61-1618025ddc52-kube-api-access-7plgw\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004079 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-metrics\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004114 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-metrics-certs\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004212 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-reloader\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004277 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-cert\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004339 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004376 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-conf\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004548 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-sockets\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004610 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-reloader\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.004693 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-conf\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.009154 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/22957d7b-20a3-4b2d-af88-d0a93924eec8-metrics-certs\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.018186 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxb8w\" (UniqueName: \"kubernetes.io/projected/22957d7b-20a3-4b2d-af88-d0a93924eec8-kube-api-access-rxb8w\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.025429 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qgsb\" (UniqueName: \"kubernetes.io/projected/d3923021-6731-4289-a3ce-f78f990d6d61-kube-api-access-8qgsb\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105756 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a9bc621d-a044-4e19-99d8-297dd534f390-metallb-excludel2\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105818 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgqpx\" (UniqueName: \"kubernetes.io/projected/a9bc621d-a044-4e19-99d8-297dd534f390-kube-api-access-bgqpx\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105839 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-metrics-certs\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105898 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7plgw\" (UniqueName: \"kubernetes.io/projected/a3aed62f-5abf-4446-9c61-1618025ddc52-kube-api-access-7plgw\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105923 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-metrics-certs\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105938 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-cert\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.105953 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.106083 4693 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.106133 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist podName:a9bc621d-a044-4e19-99d8-297dd534f390 nodeName:}" failed. No retries permitted until 2025-11-22 09:14:19.606115756 +0000 UTC m=+655.748618046 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist") pod "speaker-5gdzr" (UID: "a9bc621d-a044-4e19-99d8-297dd534f390") : secret "metallb-memberlist" not found Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.106185 4693 secret.go:188] Couldn't get secret metallb-system/controller-certs-secret: secret "controller-certs-secret" not found Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.106395 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/a9bc621d-a044-4e19-99d8-297dd534f390-metallb-excludel2\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.106426 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-metrics-certs podName:a3aed62f-5abf-4446-9c61-1618025ddc52 nodeName:}" failed. No retries permitted until 2025-11-22 09:14:19.606386435 +0000 UTC m=+655.748888716 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-metrics-certs") pod "controller-6c7b4b5f48-j5p4z" (UID: "a3aed62f-5abf-4446-9c61-1618025ddc52") : secret "controller-certs-secret" not found Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.112142 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-metrics-certs\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.115189 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-cert\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.119567 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7plgw\" (UniqueName: \"kubernetes.io/projected/a3aed62f-5abf-4446-9c61-1618025ddc52-kube-api-access-7plgw\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.124994 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgqpx\" (UniqueName: \"kubernetes.io/projected/a9bc621d-a044-4e19-99d8-297dd534f390-kube-api-access-bgqpx\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.508153 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3923021-6731-4289-a3ce-f78f990d6d61-cert\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.511595 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/d3923021-6731-4289-a3ce-f78f990d6d61-cert\") pod \"frr-k8s-webhook-server-6998585d5-dlqvh\" (UID: \"d3923021-6731-4289-a3ce-f78f990d6d61\") " pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.609136 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.609218 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-metrics-certs\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.609331 4693 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Nov 22 09:14:19 crc kubenswrapper[4693]: E1122 09:14:19.609422 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist podName:a9bc621d-a044-4e19-99d8-297dd534f390 nodeName:}" failed. No retries permitted until 2025-11-22 09:14:20.609402412 +0000 UTC m=+656.751904703 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist") pod "speaker-5gdzr" (UID: "a9bc621d-a044-4e19-99d8-297dd534f390") : secret "metallb-memberlist" not found Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.611561 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a3aed62f-5abf-4446-9c61-1618025ddc52-metrics-certs\") pod \"controller-6c7b4b5f48-j5p4z\" (UID: \"a3aed62f-5abf-4446-9c61-1618025ddc52\") " pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.780967 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.849959 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.887966 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Nov 22 09:14:19 crc kubenswrapper[4693]: I1122 09:14:19.895416 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/22957d7b-20a3-4b2d-af88-d0a93924eec8-frr-startup\") pod \"frr-k8s-rps7r\" (UID: \"22957d7b-20a3-4b2d-af88-d0a93924eec8\") " pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.086949 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.127459 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh"] Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.194288 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6c7b4b5f48-j5p4z"] Nov 22 09:14:20 crc kubenswrapper[4693]: W1122 09:14:20.199256 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda3aed62f_5abf_4446_9c61_1618025ddc52.slice/crio-087dc33a6def2e8a873c8da489b94527d9928cdca9d22ffd62ffd89b5983d300 WatchSource:0}: Error finding container 087dc33a6def2e8a873c8da489b94527d9928cdca9d22ffd62ffd89b5983d300: Status 404 returned error can't find the container with id 087dc33a6def2e8a873c8da489b94527d9928cdca9d22ffd62ffd89b5983d300 Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.221707 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"aa2a66f4d016dbf08142e999f36f20c7af84b3dd9167c9e965985c92b840bda4"} Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.222667 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-j5p4z" event={"ID":"a3aed62f-5abf-4446-9c61-1618025ddc52","Type":"ContainerStarted","Data":"087dc33a6def2e8a873c8da489b94527d9928cdca9d22ffd62ffd89b5983d300"} Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.223370 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" event={"ID":"d3923021-6731-4289-a3ce-f78f990d6d61","Type":"ContainerStarted","Data":"2d365a67c60c49467c1a8c4edafd751cfd8f2657036f1c83cc0e12ef73df49e2"} Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.633147 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.638188 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/a9bc621d-a044-4e19-99d8-297dd534f390-memberlist\") pod \"speaker-5gdzr\" (UID: \"a9bc621d-a044-4e19-99d8-297dd534f390\") " pod="metallb-system/speaker-5gdzr" Nov 22 09:14:20 crc kubenswrapper[4693]: I1122 09:14:20.743279 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-5gdzr" Nov 22 09:14:20 crc kubenswrapper[4693]: W1122 09:14:20.757428 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9bc621d_a044_4e19_99d8_297dd534f390.slice/crio-e1bf9f868580e5959d4458e33e949c7257b039db1c3ee2fd023981774dbe710b WatchSource:0}: Error finding container e1bf9f868580e5959d4458e33e949c7257b039db1c3ee2fd023981774dbe710b: Status 404 returned error can't find the container with id e1bf9f868580e5959d4458e33e949c7257b039db1c3ee2fd023981774dbe710b Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.231124 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5gdzr" event={"ID":"a9bc621d-a044-4e19-99d8-297dd534f390","Type":"ContainerStarted","Data":"f9e36ab7cfbeec51cd83918756e95876e9ba136ca13bc199b83f1df0c3cedba5"} Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.231524 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5gdzr" event={"ID":"a9bc621d-a044-4e19-99d8-297dd534f390","Type":"ContainerStarted","Data":"8b593db526f4c515953e08743928cbd558a6ab23305abe2efdf9675cae2ff309"} Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.231537 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-5gdzr" event={"ID":"a9bc621d-a044-4e19-99d8-297dd534f390","Type":"ContainerStarted","Data":"e1bf9f868580e5959d4458e33e949c7257b039db1c3ee2fd023981774dbe710b"} Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.231740 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-5gdzr" Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.233750 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-j5p4z" event={"ID":"a3aed62f-5abf-4446-9c61-1618025ddc52","Type":"ContainerStarted","Data":"c0ea37ea72f4bdc3ca6fd3122d852ee7a126d749405b809b0bad202df85a3d9b"} Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.233899 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6c7b4b5f48-j5p4z" event={"ID":"a3aed62f-5abf-4446-9c61-1618025ddc52","Type":"ContainerStarted","Data":"df767bf0422b7438c98c65da508f992fabb053ac408db8c1331f69c54aea2f33"} Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.245338 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-5gdzr" podStartSLOduration=3.245322357 podStartE2EDuration="3.245322357s" podCreationTimestamp="2025-11-22 09:14:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:14:21.244346804 +0000 UTC m=+657.386849095" watchObservedRunningTime="2025-11-22 09:14:21.245322357 +0000 UTC m=+657.387824649" Nov 22 09:14:21 crc kubenswrapper[4693]: I1122 09:14:21.257893 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6c7b4b5f48-j5p4z" podStartSLOduration=3.257836163 podStartE2EDuration="3.257836163s" podCreationTimestamp="2025-11-22 09:14:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:14:21.255097156 +0000 UTC m=+657.397599447" watchObservedRunningTime="2025-11-22 09:14:21.257836163 +0000 UTC m=+657.400338454" Nov 22 09:14:22 crc kubenswrapper[4693]: I1122 09:14:22.239511 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:26 crc kubenswrapper[4693]: I1122 09:14:26.277165 4693 generic.go:334] "Generic (PLEG): container finished" podID="22957d7b-20a3-4b2d-af88-d0a93924eec8" containerID="0d83b46263bd8108c41891c0a0f70221dd8d5051009d02533bc5972e7d2986ca" exitCode=0 Nov 22 09:14:26 crc kubenswrapper[4693]: I1122 09:14:26.277310 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerDied","Data":"0d83b46263bd8108c41891c0a0f70221dd8d5051009d02533bc5972e7d2986ca"} Nov 22 09:14:26 crc kubenswrapper[4693]: I1122 09:14:26.280192 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" event={"ID":"d3923021-6731-4289-a3ce-f78f990d6d61","Type":"ContainerStarted","Data":"aecf2a3ae016f415ceb0e58209aedb4f1df997feae3ecd71ec583c053a1764b0"} Nov 22 09:14:26 crc kubenswrapper[4693]: I1122 09:14:26.280324 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:26 crc kubenswrapper[4693]: I1122 09:14:26.306461 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" podStartSLOduration=2.8955389179999997 podStartE2EDuration="8.306447883s" podCreationTimestamp="2025-11-22 09:14:18 +0000 UTC" firstStartedPulling="2025-11-22 09:14:20.14218219 +0000 UTC m=+656.284684482" lastFinishedPulling="2025-11-22 09:14:25.553091155 +0000 UTC m=+661.695593447" observedRunningTime="2025-11-22 09:14:26.303951973 +0000 UTC m=+662.446454265" watchObservedRunningTime="2025-11-22 09:14:26.306447883 +0000 UTC m=+662.448950165" Nov 22 09:14:27 crc kubenswrapper[4693]: I1122 09:14:27.285426 4693 generic.go:334] "Generic (PLEG): container finished" podID="22957d7b-20a3-4b2d-af88-d0a93924eec8" containerID="b677a03ec62c1dba0aef099dc118dc16dc1ee3360f2d9ff34effd26c0631ccfb" exitCode=0 Nov 22 09:14:27 crc kubenswrapper[4693]: I1122 09:14:27.285521 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerDied","Data":"b677a03ec62c1dba0aef099dc118dc16dc1ee3360f2d9ff34effd26c0631ccfb"} Nov 22 09:14:28 crc kubenswrapper[4693]: I1122 09:14:28.291768 4693 generic.go:334] "Generic (PLEG): container finished" podID="22957d7b-20a3-4b2d-af88-d0a93924eec8" containerID="4b87afeb73b9f229d0ce69cea0adea01e4a2af19f50deea3f7a0d9aadcd55bb6" exitCode=0 Nov 22 09:14:28 crc kubenswrapper[4693]: I1122 09:14:28.291815 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerDied","Data":"4b87afeb73b9f229d0ce69cea0adea01e4a2af19f50deea3f7a0d9aadcd55bb6"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.299867 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"c34b61549c25305efb5f81206e9f5c030397d86e228c759f1c2b010263ef3353"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.300416 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"e9b9e20b6e09c9349ff847b4057b360b8ebc04616e1755eebca398ba12f54ba4"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.300490 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.300551 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"3327519dc2fb6acab38b4ab3ebb875e6d60a0f3d3ea4383c58d9baaf3c2e889a"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.300602 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"a48be3e0f534d66b617760e1da3eb2dd203c68675e32567726dce6e7b9927495"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.300665 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"e0895171a91334140190b9f8da22f00b4d8ecb673597b71f584b900353119a29"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.300724 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-rps7r" event={"ID":"22957d7b-20a3-4b2d-af88-d0a93924eec8","Type":"ContainerStarted","Data":"9c50d6210ed08e0bb0a90c56f2140b831d5966de4cc6bcabbca28d9b63f7361a"} Nov 22 09:14:29 crc kubenswrapper[4693]: I1122 09:14:29.319081 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-rps7r" podStartSLOduration=5.97800747 podStartE2EDuration="11.319067709s" podCreationTimestamp="2025-11-22 09:14:18 +0000 UTC" firstStartedPulling="2025-11-22 09:14:20.200650643 +0000 UTC m=+656.343152933" lastFinishedPulling="2025-11-22 09:14:25.541710881 +0000 UTC m=+661.684213172" observedRunningTime="2025-11-22 09:14:29.317742228 +0000 UTC m=+665.460244509" watchObservedRunningTime="2025-11-22 09:14:29.319067709 +0000 UTC m=+665.461570000" Nov 22 09:14:30 crc kubenswrapper[4693]: I1122 09:14:30.087609 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:30 crc kubenswrapper[4693]: I1122 09:14:30.117058 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:30 crc kubenswrapper[4693]: I1122 09:14:30.746372 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-5gdzr" Nov 22 09:14:32 crc kubenswrapper[4693]: I1122 09:14:32.888152 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-66l2k"] Nov 22 09:14:32 crc kubenswrapper[4693]: I1122 09:14:32.889422 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:32 crc kubenswrapper[4693]: I1122 09:14:32.891132 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-cckd9" Nov 22 09:14:32 crc kubenswrapper[4693]: I1122 09:14:32.891173 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Nov 22 09:14:32 crc kubenswrapper[4693]: I1122 09:14:32.892114 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Nov 22 09:14:32 crc kubenswrapper[4693]: I1122 09:14:32.897248 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-66l2k"] Nov 22 09:14:33 crc kubenswrapper[4693]: I1122 09:14:33.078675 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hm66s\" (UniqueName: \"kubernetes.io/projected/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d-kube-api-access-hm66s\") pod \"openstack-operator-index-66l2k\" (UID: \"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d\") " pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:33 crc kubenswrapper[4693]: I1122 09:14:33.180121 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hm66s\" (UniqueName: \"kubernetes.io/projected/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d-kube-api-access-hm66s\") pod \"openstack-operator-index-66l2k\" (UID: \"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d\") " pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:33 crc kubenswrapper[4693]: I1122 09:14:33.196273 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hm66s\" (UniqueName: \"kubernetes.io/projected/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d-kube-api-access-hm66s\") pod \"openstack-operator-index-66l2k\" (UID: \"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d\") " pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:33 crc kubenswrapper[4693]: I1122 09:14:33.206223 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:33 crc kubenswrapper[4693]: I1122 09:14:33.542716 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-66l2k"] Nov 22 09:14:33 crc kubenswrapper[4693]: W1122 09:14:33.550733 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda4b31bd2_a1c6_4d9f_b7f7_0acbbda0320d.slice/crio-2e9764f507974d27d9428202a0bffb1509097ce05738a6283748f0e506c26c6c WatchSource:0}: Error finding container 2e9764f507974d27d9428202a0bffb1509097ce05738a6283748f0e506c26c6c: Status 404 returned error can't find the container with id 2e9764f507974d27d9428202a0bffb1509097ce05738a6283748f0e506c26c6c Nov 22 09:14:34 crc kubenswrapper[4693]: I1122 09:14:34.328737 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-66l2k" event={"ID":"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d","Type":"ContainerStarted","Data":"2e9764f507974d27d9428202a0bffb1509097ce05738a6283748f0e506c26c6c"} Nov 22 09:14:35 crc kubenswrapper[4693]: I1122 09:14:35.334261 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-66l2k" event={"ID":"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d","Type":"ContainerStarted","Data":"82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da"} Nov 22 09:14:35 crc kubenswrapper[4693]: I1122 09:14:35.348674 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-66l2k" podStartSLOduration=2.478040657 podStartE2EDuration="3.348658828s" podCreationTimestamp="2025-11-22 09:14:32 +0000 UTC" firstStartedPulling="2025-11-22 09:14:33.553349634 +0000 UTC m=+669.695851925" lastFinishedPulling="2025-11-22 09:14:34.423967805 +0000 UTC m=+670.566470096" observedRunningTime="2025-11-22 09:14:35.347700957 +0000 UTC m=+671.490203249" watchObservedRunningTime="2025-11-22 09:14:35.348658828 +0000 UTC m=+671.491161119" Nov 22 09:14:36 crc kubenswrapper[4693]: I1122 09:14:36.274722 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-66l2k"] Nov 22 09:14:36 crc kubenswrapper[4693]: I1122 09:14:36.878747 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-5zdsq"] Nov 22 09:14:36 crc kubenswrapper[4693]: I1122 09:14:36.879359 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:36 crc kubenswrapper[4693]: I1122 09:14:36.886300 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5zdsq"] Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.017939 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvhl8\" (UniqueName: \"kubernetes.io/projected/932a4450-e8f1-4d96-acf5-1249c1f7cb07-kube-api-access-xvhl8\") pod \"openstack-operator-index-5zdsq\" (UID: \"932a4450-e8f1-4d96-acf5-1249c1f7cb07\") " pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.119398 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvhl8\" (UniqueName: \"kubernetes.io/projected/932a4450-e8f1-4d96-acf5-1249c1f7cb07-kube-api-access-xvhl8\") pod \"openstack-operator-index-5zdsq\" (UID: \"932a4450-e8f1-4d96-acf5-1249c1f7cb07\") " pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.133597 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvhl8\" (UniqueName: \"kubernetes.io/projected/932a4450-e8f1-4d96-acf5-1249c1f7cb07-kube-api-access-xvhl8\") pod \"openstack-operator-index-5zdsq\" (UID: \"932a4450-e8f1-4d96-acf5-1249c1f7cb07\") " pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.192825 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.342815 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-66l2k" podUID="a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" containerName="registry-server" containerID="cri-o://82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da" gracePeriod=2 Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.538564 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-5zdsq"] Nov 22 09:14:37 crc kubenswrapper[4693]: W1122 09:14:37.544143 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod932a4450_e8f1_4d96_acf5_1249c1f7cb07.slice/crio-b888f42576a8db012acd5df0646ca9a10e38a7c388ec86ce89f4ef3190c9f664 WatchSource:0}: Error finding container b888f42576a8db012acd5df0646ca9a10e38a7c388ec86ce89f4ef3190c9f664: Status 404 returned error can't find the container with id b888f42576a8db012acd5df0646ca9a10e38a7c388ec86ce89f4ef3190c9f664 Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.613218 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.726440 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hm66s\" (UniqueName: \"kubernetes.io/projected/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d-kube-api-access-hm66s\") pod \"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d\" (UID: \"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d\") " Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.730392 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d-kube-api-access-hm66s" (OuterVolumeSpecName: "kube-api-access-hm66s") pod "a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" (UID: "a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d"). InnerVolumeSpecName "kube-api-access-hm66s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:14:37 crc kubenswrapper[4693]: I1122 09:14:37.827839 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hm66s\" (UniqueName: \"kubernetes.io/projected/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d-kube-api-access-hm66s\") on node \"crc\" DevicePath \"\"" Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.347548 4693 generic.go:334] "Generic (PLEG): container finished" podID="a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" containerID="82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da" exitCode=0 Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.347587 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-66l2k" Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.347608 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-66l2k" event={"ID":"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d","Type":"ContainerDied","Data":"82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da"} Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.347655 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-66l2k" event={"ID":"a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d","Type":"ContainerDied","Data":"2e9764f507974d27d9428202a0bffb1509097ce05738a6283748f0e506c26c6c"} Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.347676 4693 scope.go:117] "RemoveContainer" containerID="82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da" Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.348750 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5zdsq" event={"ID":"932a4450-e8f1-4d96-acf5-1249c1f7cb07","Type":"ContainerStarted","Data":"9e7e652106d9a0265379095bd8d7975f1091d4850944c58ff1fea184a7e9ce59"} Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.348785 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-5zdsq" event={"ID":"932a4450-e8f1-4d96-acf5-1249c1f7cb07","Type":"ContainerStarted","Data":"b888f42576a8db012acd5df0646ca9a10e38a7c388ec86ce89f4ef3190c9f664"} Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.361659 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-5zdsq" podStartSLOduration=1.8487358029999998 podStartE2EDuration="2.361642647s" podCreationTimestamp="2025-11-22 09:14:36 +0000 UTC" firstStartedPulling="2025-11-22 09:14:37.547823771 +0000 UTC m=+673.690326062" lastFinishedPulling="2025-11-22 09:14:38.060730615 +0000 UTC m=+674.203232906" observedRunningTime="2025-11-22 09:14:38.359322567 +0000 UTC m=+674.501824858" watchObservedRunningTime="2025-11-22 09:14:38.361642647 +0000 UTC m=+674.504144938" Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.362472 4693 scope.go:117] "RemoveContainer" containerID="82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da" Nov 22 09:14:38 crc kubenswrapper[4693]: E1122 09:14:38.363135 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da\": container with ID starting with 82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da not found: ID does not exist" containerID="82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da" Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.363169 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da"} err="failed to get container status \"82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da\": rpc error: code = NotFound desc = could not find container \"82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da\": container with ID starting with 82acc6cf3da392bb69a6f73676c02fc93c71f8cda011d76a8ef04be4a5f1f0da not found: ID does not exist" Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.369144 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-66l2k"] Nov 22 09:14:38 crc kubenswrapper[4693]: I1122 09:14:38.371705 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-66l2k"] Nov 22 09:14:39 crc kubenswrapper[4693]: I1122 09:14:39.784646 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-6998585d5-dlqvh" Nov 22 09:14:39 crc kubenswrapper[4693]: I1122 09:14:39.853495 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6c7b4b5f48-j5p4z" Nov 22 09:14:40 crc kubenswrapper[4693]: I1122 09:14:40.090577 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-rps7r" Nov 22 09:14:40 crc kubenswrapper[4693]: I1122 09:14:40.153576 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" path="/var/lib/kubelet/pods/a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d/volumes" Nov 22 09:14:47 crc kubenswrapper[4693]: I1122 09:14:47.193364 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:47 crc kubenswrapper[4693]: I1122 09:14:47.193835 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:47 crc kubenswrapper[4693]: I1122 09:14:47.213688 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:47 crc kubenswrapper[4693]: I1122 09:14:47.402296 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-5zdsq" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.097710 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6"] Nov 22 09:14:49 crc kubenswrapper[4693]: E1122 09:14:49.097919 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" containerName="registry-server" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.097932 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" containerName="registry-server" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.098017 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4b31bd2-a1c6-4d9f-b7f7-0acbbda0320d" containerName="registry-server" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.098644 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.099985 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-vqwvh" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.104002 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6"] Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.245355 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-util\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.245587 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxfv7\" (UniqueName: \"kubernetes.io/projected/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-kube-api-access-hxfv7\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.245647 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-bundle\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.346738 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-util\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.346781 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxfv7\" (UniqueName: \"kubernetes.io/projected/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-kube-api-access-hxfv7\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.346897 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-bundle\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.347172 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-util\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.347249 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-bundle\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.361291 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxfv7\" (UniqueName: \"kubernetes.io/projected/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-kube-api-access-hxfv7\") pod \"1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.416224 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:49 crc kubenswrapper[4693]: I1122 09:14:49.749220 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6"] Nov 22 09:14:49 crc kubenswrapper[4693]: W1122 09:14:49.753690 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podddd4c776_1709_4074_ac2f_8c9f37aa52f1.slice/crio-da0e464408266cf682cb72e0deb0d73e72c653a094505d08e126edcfe64171db WatchSource:0}: Error finding container da0e464408266cf682cb72e0deb0d73e72c653a094505d08e126edcfe64171db: Status 404 returned error can't find the container with id da0e464408266cf682cb72e0deb0d73e72c653a094505d08e126edcfe64171db Nov 22 09:14:50 crc kubenswrapper[4693]: I1122 09:14:50.398819 4693 generic.go:334] "Generic (PLEG): container finished" podID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerID="45b8cb163d8b95f0b3542074d1fb9652d491e4592ea66844b2a1731785cf4928" exitCode=0 Nov 22 09:14:50 crc kubenswrapper[4693]: I1122 09:14:50.398928 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" event={"ID":"ddd4c776-1709-4074-ac2f-8c9f37aa52f1","Type":"ContainerDied","Data":"45b8cb163d8b95f0b3542074d1fb9652d491e4592ea66844b2a1731785cf4928"} Nov 22 09:14:50 crc kubenswrapper[4693]: I1122 09:14:50.399087 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" event={"ID":"ddd4c776-1709-4074-ac2f-8c9f37aa52f1","Type":"ContainerStarted","Data":"da0e464408266cf682cb72e0deb0d73e72c653a094505d08e126edcfe64171db"} Nov 22 09:14:51 crc kubenswrapper[4693]: I1122 09:14:51.410267 4693 generic.go:334] "Generic (PLEG): container finished" podID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerID="3ac724f16d53086a0c275a7c0fb8fed517e6c8da831546ab047f49429d6fb03b" exitCode=0 Nov 22 09:14:51 crc kubenswrapper[4693]: I1122 09:14:51.410334 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" event={"ID":"ddd4c776-1709-4074-ac2f-8c9f37aa52f1","Type":"ContainerDied","Data":"3ac724f16d53086a0c275a7c0fb8fed517e6c8da831546ab047f49429d6fb03b"} Nov 22 09:14:52 crc kubenswrapper[4693]: I1122 09:14:52.417790 4693 generic.go:334] "Generic (PLEG): container finished" podID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerID="d606f22b639fb049b29f7244d43fc0cd2b38d713c182be4c93b911007bf600d1" exitCode=0 Nov 22 09:14:52 crc kubenswrapper[4693]: I1122 09:14:52.417870 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" event={"ID":"ddd4c776-1709-4074-ac2f-8c9f37aa52f1","Type":"ContainerDied","Data":"d606f22b639fb049b29f7244d43fc0cd2b38d713c182be4c93b911007bf600d1"} Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.609994 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.796707 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-util\") pod \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.796917 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxfv7\" (UniqueName: \"kubernetes.io/projected/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-kube-api-access-hxfv7\") pod \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.796956 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-bundle\") pod \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\" (UID: \"ddd4c776-1709-4074-ac2f-8c9f37aa52f1\") " Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.797630 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-bundle" (OuterVolumeSpecName: "bundle") pod "ddd4c776-1709-4074-ac2f-8c9f37aa52f1" (UID: "ddd4c776-1709-4074-ac2f-8c9f37aa52f1"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.803493 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-kube-api-access-hxfv7" (OuterVolumeSpecName: "kube-api-access-hxfv7") pod "ddd4c776-1709-4074-ac2f-8c9f37aa52f1" (UID: "ddd4c776-1709-4074-ac2f-8c9f37aa52f1"). InnerVolumeSpecName "kube-api-access-hxfv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.806467 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-util" (OuterVolumeSpecName: "util") pod "ddd4c776-1709-4074-ac2f-8c9f37aa52f1" (UID: "ddd4c776-1709-4074-ac2f-8c9f37aa52f1"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.898308 4693 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-util\") on node \"crc\" DevicePath \"\"" Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.898341 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxfv7\" (UniqueName: \"kubernetes.io/projected/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-kube-api-access-hxfv7\") on node \"crc\" DevicePath \"\"" Nov 22 09:14:53 crc kubenswrapper[4693]: I1122 09:14:53.898353 4693 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/ddd4c776-1709-4074-ac2f-8c9f37aa52f1-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:14:54 crc kubenswrapper[4693]: I1122 09:14:54.429590 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" Nov 22 09:14:54 crc kubenswrapper[4693]: I1122 09:14:54.429542 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6" event={"ID":"ddd4c776-1709-4074-ac2f-8c9f37aa52f1","Type":"ContainerDied","Data":"da0e464408266cf682cb72e0deb0d73e72c653a094505d08e126edcfe64171db"} Nov 22 09:14:54 crc kubenswrapper[4693]: I1122 09:14:54.429992 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da0e464408266cf682cb72e0deb0d73e72c653a094505d08e126edcfe64171db" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.786833 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn"] Nov 22 09:14:56 crc kubenswrapper[4693]: E1122 09:14:56.787245 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="pull" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.787257 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="pull" Nov 22 09:14:56 crc kubenswrapper[4693]: E1122 09:14:56.787281 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="util" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.787287 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="util" Nov 22 09:14:56 crc kubenswrapper[4693]: E1122 09:14:56.787296 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="extract" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.787301 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="extract" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.787393 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddd4c776-1709-4074-ac2f-8c9f37aa52f1" containerName="extract" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.787897 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.789863 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-l6d6z" Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.805137 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn"] Nov 22 09:14:56 crc kubenswrapper[4693]: I1122 09:14:56.939036 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pdrdl\" (UniqueName: \"kubernetes.io/projected/13a826c8-8c21-452b-80d9-237f609a62a5-kube-api-access-pdrdl\") pod \"openstack-operator-controller-operator-8486c7f98b-hntsn\" (UID: \"13a826c8-8c21-452b-80d9-237f609a62a5\") " pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:14:57 crc kubenswrapper[4693]: I1122 09:14:57.041463 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pdrdl\" (UniqueName: \"kubernetes.io/projected/13a826c8-8c21-452b-80d9-237f609a62a5-kube-api-access-pdrdl\") pod \"openstack-operator-controller-operator-8486c7f98b-hntsn\" (UID: \"13a826c8-8c21-452b-80d9-237f609a62a5\") " pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:14:57 crc kubenswrapper[4693]: I1122 09:14:57.057032 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pdrdl\" (UniqueName: \"kubernetes.io/projected/13a826c8-8c21-452b-80d9-237f609a62a5-kube-api-access-pdrdl\") pod \"openstack-operator-controller-operator-8486c7f98b-hntsn\" (UID: \"13a826c8-8c21-452b-80d9-237f609a62a5\") " pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:14:57 crc kubenswrapper[4693]: I1122 09:14:57.102221 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:14:57 crc kubenswrapper[4693]: I1122 09:14:57.475587 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn"] Nov 22 09:14:58 crc kubenswrapper[4693]: I1122 09:14:58.449800 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" event={"ID":"13a826c8-8c21-452b-80d9-237f609a62a5","Type":"ContainerStarted","Data":"6926781c79824644edc981e1c307330ca2fe75d10b9604f33499c37fdc55abb1"} Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.166129 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9"] Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.166868 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.169708 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.169907 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.173140 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9"] Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.178291 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0de4e9b7-704c-4f94-92fb-92faf93a2795-secret-volume\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.178321 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b7ck5\" (UniqueName: \"kubernetes.io/projected/0de4e9b7-704c-4f94-92fb-92faf93a2795-kube-api-access-b7ck5\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.178361 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0de4e9b7-704c-4f94-92fb-92faf93a2795-config-volume\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.246995 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.247047 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.280403 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0de4e9b7-704c-4f94-92fb-92faf93a2795-secret-volume\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.280457 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b7ck5\" (UniqueName: \"kubernetes.io/projected/0de4e9b7-704c-4f94-92fb-92faf93a2795-kube-api-access-b7ck5\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.280506 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0de4e9b7-704c-4f94-92fb-92faf93a2795-config-volume\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.281442 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0de4e9b7-704c-4f94-92fb-92faf93a2795-config-volume\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.285948 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0de4e9b7-704c-4f94-92fb-92faf93a2795-secret-volume\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.293938 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b7ck5\" (UniqueName: \"kubernetes.io/projected/0de4e9b7-704c-4f94-92fb-92faf93a2795-kube-api-access-b7ck5\") pod \"collect-profiles-29396715-t8gd9\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:00 crc kubenswrapper[4693]: I1122 09:15:00.484924 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:01 crc kubenswrapper[4693]: I1122 09:15:01.436812 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9"] Nov 22 09:15:01 crc kubenswrapper[4693]: W1122 09:15:01.443611 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0de4e9b7_704c_4f94_92fb_92faf93a2795.slice/crio-4cb3e4cd0422109867e14055aed34fb0c4026d88d933f0adbcbf6f1d99d65162 WatchSource:0}: Error finding container 4cb3e4cd0422109867e14055aed34fb0c4026d88d933f0adbcbf6f1d99d65162: Status 404 returned error can't find the container with id 4cb3e4cd0422109867e14055aed34fb0c4026d88d933f0adbcbf6f1d99d65162 Nov 22 09:15:01 crc kubenswrapper[4693]: I1122 09:15:01.471810 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" event={"ID":"0de4e9b7-704c-4f94-92fb-92faf93a2795","Type":"ContainerStarted","Data":"4cb3e4cd0422109867e14055aed34fb0c4026d88d933f0adbcbf6f1d99d65162"} Nov 22 09:15:01 crc kubenswrapper[4693]: I1122 09:15:01.473409 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" event={"ID":"13a826c8-8c21-452b-80d9-237f609a62a5","Type":"ContainerStarted","Data":"3a26975ecd9d42d0d45f32c662f537df17bc5ebaf69718ca389c34be31a86b10"} Nov 22 09:15:02 crc kubenswrapper[4693]: I1122 09:15:02.479769 4693 generic.go:334] "Generic (PLEG): container finished" podID="0de4e9b7-704c-4f94-92fb-92faf93a2795" containerID="9bf570ac5f90d891a2fe8276a6fd8bde7592fe6cfd7535ada7b8f29592db2ff1" exitCode=0 Nov 22 09:15:02 crc kubenswrapper[4693]: I1122 09:15:02.480139 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" event={"ID":"0de4e9b7-704c-4f94-92fb-92faf93a2795","Type":"ContainerDied","Data":"9bf570ac5f90d891a2fe8276a6fd8bde7592fe6cfd7535ada7b8f29592db2ff1"} Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.485709 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" event={"ID":"13a826c8-8c21-452b-80d9-237f609a62a5","Type":"ContainerStarted","Data":"d4c928f5625fced4d0a805bc7428ae672b5ed299fa6e0d1e8833656233d50753"} Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.506773 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" podStartSLOduration=2.17263996 podStartE2EDuration="7.506755488s" podCreationTimestamp="2025-11-22 09:14:56 +0000 UTC" firstStartedPulling="2025-11-22 09:14:57.483562111 +0000 UTC m=+693.626064403" lastFinishedPulling="2025-11-22 09:15:02.81767764 +0000 UTC m=+698.960179931" observedRunningTime="2025-11-22 09:15:03.50531516 +0000 UTC m=+699.647817452" watchObservedRunningTime="2025-11-22 09:15:03.506755488 +0000 UTC m=+699.649257779" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.694752 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.821776 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0de4e9b7-704c-4f94-92fb-92faf93a2795-secret-volume\") pod \"0de4e9b7-704c-4f94-92fb-92faf93a2795\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.821833 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b7ck5\" (UniqueName: \"kubernetes.io/projected/0de4e9b7-704c-4f94-92fb-92faf93a2795-kube-api-access-b7ck5\") pod \"0de4e9b7-704c-4f94-92fb-92faf93a2795\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.821962 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0de4e9b7-704c-4f94-92fb-92faf93a2795-config-volume\") pod \"0de4e9b7-704c-4f94-92fb-92faf93a2795\" (UID: \"0de4e9b7-704c-4f94-92fb-92faf93a2795\") " Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.822726 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0de4e9b7-704c-4f94-92fb-92faf93a2795-config-volume" (OuterVolumeSpecName: "config-volume") pod "0de4e9b7-704c-4f94-92fb-92faf93a2795" (UID: "0de4e9b7-704c-4f94-92fb-92faf93a2795"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.827473 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0de4e9b7-704c-4f94-92fb-92faf93a2795-kube-api-access-b7ck5" (OuterVolumeSpecName: "kube-api-access-b7ck5") pod "0de4e9b7-704c-4f94-92fb-92faf93a2795" (UID: "0de4e9b7-704c-4f94-92fb-92faf93a2795"). InnerVolumeSpecName "kube-api-access-b7ck5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.827501 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0de4e9b7-704c-4f94-92fb-92faf93a2795-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0de4e9b7-704c-4f94-92fb-92faf93a2795" (UID: "0de4e9b7-704c-4f94-92fb-92faf93a2795"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.923999 4693 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0de4e9b7-704c-4f94-92fb-92faf93a2795-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.924030 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b7ck5\" (UniqueName: \"kubernetes.io/projected/0de4e9b7-704c-4f94-92fb-92faf93a2795-kube-api-access-b7ck5\") on node \"crc\" DevicePath \"\"" Nov 22 09:15:03 crc kubenswrapper[4693]: I1122 09:15:03.924041 4693 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0de4e9b7-704c-4f94-92fb-92faf93a2795-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:15:04 crc kubenswrapper[4693]: I1122 09:15:04.490874 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" Nov 22 09:15:04 crc kubenswrapper[4693]: I1122 09:15:04.490872 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9" event={"ID":"0de4e9b7-704c-4f94-92fb-92faf93a2795","Type":"ContainerDied","Data":"4cb3e4cd0422109867e14055aed34fb0c4026d88d933f0adbcbf6f1d99d65162"} Nov 22 09:15:04 crc kubenswrapper[4693]: I1122 09:15:04.491533 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4cb3e4cd0422109867e14055aed34fb0c4026d88d933f0adbcbf6f1d99d65162" Nov 22 09:15:04 crc kubenswrapper[4693]: I1122 09:15:04.491553 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:15:07 crc kubenswrapper[4693]: I1122 09:15:07.105207 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-8486c7f98b-hntsn" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.660802 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7"] Nov 22 09:15:23 crc kubenswrapper[4693]: E1122 09:15:23.661372 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0de4e9b7-704c-4f94-92fb-92faf93a2795" containerName="collect-profiles" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.661383 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0de4e9b7-704c-4f94-92fb-92faf93a2795" containerName="collect-profiles" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.661477 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0de4e9b7-704c-4f94-92fb-92faf93a2795" containerName="collect-profiles" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.662001 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.667931 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-2gdww" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.675266 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.676049 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.678387 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-zng76" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.693274 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.693991 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.696690 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.699434 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-s68ps" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.700185 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.717949 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.718984 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.729556 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-hkdwp" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.757908 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.758102 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.765883 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.766818 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.775202 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.775933 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.779058 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-mhhw6" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.779355 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-jnnjh" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.784399 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.807662 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.829009 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.829939 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.833428 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77tmk\" (UniqueName: \"kubernetes.io/projected/60c907e3-d225-414c-a15c-6f0a6999eb9d-kube-api-access-77tmk\") pod \"glance-operator-controller-manager-8667fbf6f6-xh2cl\" (UID: \"60c907e3-d225-414c-a15c-6f0a6999eb9d\") " pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.833467 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8455f\" (UniqueName: \"kubernetes.io/projected/cadb4705-8655-4f69-b00f-049e64a71b28-kube-api-access-8455f\") pod \"cinder-operator-controller-manager-6d8fd67bf7-lz798\" (UID: \"cadb4705-8655-4f69-b00f-049e64a71b28\") " pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.833511 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldmdn\" (UniqueName: \"kubernetes.io/projected/f1bb1578-9697-4968-b36f-b77d228fafaa-kube-api-access-ldmdn\") pod \"barbican-operator-controller-manager-7768f8c84f-c42f7\" (UID: \"f1bb1578-9697-4968-b36f-b77d228fafaa\") " pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.833537 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jxx9\" (UniqueName: \"kubernetes.io/projected/390da39a-b184-4348-9894-af8f4237aba8-kube-api-access-6jxx9\") pod \"designate-operator-controller-manager-56dfb6b67f-r7t48\" (UID: \"390da39a-b184-4348-9894-af8f4237aba8\") " pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.834019 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.834795 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.836351 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.837100 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.837359 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.837480 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-gfxtn" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.837549 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rz7hm" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.840232 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-jcbbf" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.843825 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.847690 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.848579 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.851881 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-wnt6j" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.859592 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.870556 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.881914 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.882754 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.885331 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-fqk45" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.893608 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.900433 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.900457 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.900536 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.901516 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.905087 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-flwxk" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.905383 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-2ztw5" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.913898 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.924889 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.925637 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.930578 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-sxf82" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939309 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jxx9\" (UniqueName: \"kubernetes.io/projected/390da39a-b184-4348-9894-af8f4237aba8-kube-api-access-6jxx9\") pod \"designate-operator-controller-manager-56dfb6b67f-r7t48\" (UID: \"390da39a-b184-4348-9894-af8f4237aba8\") " pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939361 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjpq2\" (UniqueName: \"kubernetes.io/projected/0d573b45-216b-4869-96f6-c460bb7ff10f-kube-api-access-rjpq2\") pod \"heat-operator-controller-manager-bf4c6585d-vts55\" (UID: \"0d573b45-216b-4869-96f6-c460bb7ff10f\") " pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939382 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vnbj9\" (UniqueName: \"kubernetes.io/projected/703d454c-2336-4941-8fe9-5b717f57423f-kube-api-access-vnbj9\") pod \"keystone-operator-controller-manager-7879fb76fd-5xnb4\" (UID: \"703d454c-2336-4941-8fe9-5b717f57423f\") " pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939400 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btzwk\" (UniqueName: \"kubernetes.io/projected/6bad4eeb-497a-4459-af08-c6f1db9ee8bf-kube-api-access-btzwk\") pod \"octavia-operator-controller-manager-6fdc856c5d-q68sw\" (UID: \"6bad4eeb-497a-4459-af08-c6f1db9ee8bf\") " pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939429 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcfjd\" (UniqueName: \"kubernetes.io/projected/4b168504-e6a6-48c2-a8af-dc6a44c77e59-kube-api-access-vcfjd\") pod \"infra-operator-controller-manager-769d9c7585-9g6fk\" (UID: \"4b168504-e6a6-48c2-a8af-dc6a44c77e59\") " pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939448 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4wddj\" (UniqueName: \"kubernetes.io/projected/23476d93-a604-4bc2-9e83-5c59e574436c-kube-api-access-4wddj\") pod \"ironic-operator-controller-manager-5c75d7c94b-92vqr\" (UID: \"23476d93-a604-4bc2-9e83-5c59e574436c\") " pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939468 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77tmk\" (UniqueName: \"kubernetes.io/projected/60c907e3-d225-414c-a15c-6f0a6999eb9d-kube-api-access-77tmk\") pod \"glance-operator-controller-manager-8667fbf6f6-xh2cl\" (UID: \"60c907e3-d225-414c-a15c-6f0a6999eb9d\") " pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939486 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8455f\" (UniqueName: \"kubernetes.io/projected/cadb4705-8655-4f69-b00f-049e64a71b28-kube-api-access-8455f\") pod \"cinder-operator-controller-manager-6d8fd67bf7-lz798\" (UID: \"cadb4705-8655-4f69-b00f-049e64a71b28\") " pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939509 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8478\" (UniqueName: \"kubernetes.io/projected/9ea19f26-5477-43e9-84a2-1b8cf72f4f81-kube-api-access-j8478\") pod \"manila-operator-controller-manager-7bb88cb858-gm5p8\" (UID: \"9ea19f26-5477-43e9-84a2-1b8cf72f4f81\") " pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939525 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcjf9\" (UniqueName: \"kubernetes.io/projected/750a5fd2-0554-4a9d-a16b-9e82cb56694f-kube-api-access-jcjf9\") pod \"nova-operator-controller-manager-86d796d84d-k8mcb\" (UID: \"750a5fd2-0554-4a9d-a16b-9e82cb56694f\") " pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939540 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4djt4\" (UniqueName: \"kubernetes.io/projected/9acf7acc-6712-4fbc-ab1c-14a9e1076ab8-kube-api-access-4djt4\") pod \"mariadb-operator-controller-manager-6f8c5b86cb-t6hdc\" (UID: \"9acf7acc-6712-4fbc-ab1c-14a9e1076ab8\") " pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939556 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66str\" (UniqueName: \"kubernetes.io/projected/ed8b2ca0-7928-41f6-8e30-787058fa0808-kube-api-access-66str\") pod \"neutron-operator-controller-manager-66b7d6f598-wd7tg\" (UID: \"ed8b2ca0-7928-41f6-8e30-787058fa0808\") " pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939571 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b168504-e6a6-48c2-a8af-dc6a44c77e59-cert\") pod \"infra-operator-controller-manager-769d9c7585-9g6fk\" (UID: \"4b168504-e6a6-48c2-a8af-dc6a44c77e59\") " pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939591 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bg8zw\" (UniqueName: \"kubernetes.io/projected/e1cfaae9-e5b8-4826-9e34-6fce5657c237-kube-api-access-bg8zw\") pod \"horizon-operator-controller-manager-5d86b44686-j6jzp\" (UID: \"e1cfaae9-e5b8-4826-9e34-6fce5657c237\") " pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.939610 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldmdn\" (UniqueName: \"kubernetes.io/projected/f1bb1578-9697-4968-b36f-b77d228fafaa-kube-api-access-ldmdn\") pod \"barbican-operator-controller-manager-7768f8c84f-c42f7\" (UID: \"f1bb1578-9697-4968-b36f-b77d228fafaa\") " pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.949038 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.986359 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb"] Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.996193 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jxx9\" (UniqueName: \"kubernetes.io/projected/390da39a-b184-4348-9894-af8f4237aba8-kube-api-access-6jxx9\") pod \"designate-operator-controller-manager-56dfb6b67f-r7t48\" (UID: \"390da39a-b184-4348-9894-af8f4237aba8\") " pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.998081 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8455f\" (UniqueName: \"kubernetes.io/projected/cadb4705-8655-4f69-b00f-049e64a71b28-kube-api-access-8455f\") pod \"cinder-operator-controller-manager-6d8fd67bf7-lz798\" (UID: \"cadb4705-8655-4f69-b00f-049e64a71b28\") " pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:23 crc kubenswrapper[4693]: I1122 09:15:23.999970 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldmdn\" (UniqueName: \"kubernetes.io/projected/f1bb1578-9697-4968-b36f-b77d228fafaa-kube-api-access-ldmdn\") pod \"barbican-operator-controller-manager-7768f8c84f-c42f7\" (UID: \"f1bb1578-9697-4968-b36f-b77d228fafaa\") " pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.004917 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.008794 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77tmk\" (UniqueName: \"kubernetes.io/projected/60c907e3-d225-414c-a15c-6f0a6999eb9d-kube-api-access-77tmk\") pod \"glance-operator-controller-manager-8667fbf6f6-xh2cl\" (UID: \"60c907e3-d225-414c-a15c-6f0a6999eb9d\") " pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.009804 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.015491 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.016641 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.022877 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.025355 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.030270 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-jn8tp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.030882 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.033940 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-chj8t" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.034273 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.039782 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040482 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjpq2\" (UniqueName: \"kubernetes.io/projected/0d573b45-216b-4869-96f6-c460bb7ff10f-kube-api-access-rjpq2\") pod \"heat-operator-controller-manager-bf4c6585d-vts55\" (UID: \"0d573b45-216b-4869-96f6-c460bb7ff10f\") " pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040510 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vnbj9\" (UniqueName: \"kubernetes.io/projected/703d454c-2336-4941-8fe9-5b717f57423f-kube-api-access-vnbj9\") pod \"keystone-operator-controller-manager-7879fb76fd-5xnb4\" (UID: \"703d454c-2336-4941-8fe9-5b717f57423f\") " pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040530 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btzwk\" (UniqueName: \"kubernetes.io/projected/6bad4eeb-497a-4459-af08-c6f1db9ee8bf-kube-api-access-btzwk\") pod \"octavia-operator-controller-manager-6fdc856c5d-q68sw\" (UID: \"6bad4eeb-497a-4459-af08-c6f1db9ee8bf\") " pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040556 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcfjd\" (UniqueName: \"kubernetes.io/projected/4b168504-e6a6-48c2-a8af-dc6a44c77e59-kube-api-access-vcfjd\") pod \"infra-operator-controller-manager-769d9c7585-9g6fk\" (UID: \"4b168504-e6a6-48c2-a8af-dc6a44c77e59\") " pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040576 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4wddj\" (UniqueName: \"kubernetes.io/projected/23476d93-a604-4bc2-9e83-5c59e574436c-kube-api-access-4wddj\") pod \"ironic-operator-controller-manager-5c75d7c94b-92vqr\" (UID: \"23476d93-a604-4bc2-9e83-5c59e574436c\") " pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040593 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a100268d-89c1-412b-82a6-843711bcb44b-cert\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040619 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m4dl8\" (UniqueName: \"kubernetes.io/projected/bbbb8d7a-ee06-4f2e-9982-97b6ee86801d-kube-api-access-m4dl8\") pod \"ovn-operator-controller-manager-5bdf4f7f7f-h6k74\" (UID: \"bbbb8d7a-ee06-4f2e-9982-97b6ee86801d\") " pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040640 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8478\" (UniqueName: \"kubernetes.io/projected/9ea19f26-5477-43e9-84a2-1b8cf72f4f81-kube-api-access-j8478\") pod \"manila-operator-controller-manager-7bb88cb858-gm5p8\" (UID: \"9ea19f26-5477-43e9-84a2-1b8cf72f4f81\") " pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040657 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcjf9\" (UniqueName: \"kubernetes.io/projected/750a5fd2-0554-4a9d-a16b-9e82cb56694f-kube-api-access-jcjf9\") pod \"nova-operator-controller-manager-86d796d84d-k8mcb\" (UID: \"750a5fd2-0554-4a9d-a16b-9e82cb56694f\") " pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040672 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4djt4\" (UniqueName: \"kubernetes.io/projected/9acf7acc-6712-4fbc-ab1c-14a9e1076ab8-kube-api-access-4djt4\") pod \"mariadb-operator-controller-manager-6f8c5b86cb-t6hdc\" (UID: \"9acf7acc-6712-4fbc-ab1c-14a9e1076ab8\") " pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040689 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b168504-e6a6-48c2-a8af-dc6a44c77e59-cert\") pod \"infra-operator-controller-manager-769d9c7585-9g6fk\" (UID: \"4b168504-e6a6-48c2-a8af-dc6a44c77e59\") " pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040704 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66str\" (UniqueName: \"kubernetes.io/projected/ed8b2ca0-7928-41f6-8e30-787058fa0808-kube-api-access-66str\") pod \"neutron-operator-controller-manager-66b7d6f598-wd7tg\" (UID: \"ed8b2ca0-7928-41f6-8e30-787058fa0808\") " pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040723 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bg8zw\" (UniqueName: \"kubernetes.io/projected/e1cfaae9-e5b8-4826-9e34-6fce5657c237-kube-api-access-bg8zw\") pod \"horizon-operator-controller-manager-5d86b44686-j6jzp\" (UID: \"e1cfaae9-e5b8-4826-9e34-6fce5657c237\") " pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.040738 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lr7ct\" (UniqueName: \"kubernetes.io/projected/a100268d-89c1-412b-82a6-843711bcb44b-kube-api-access-lr7ct\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.045174 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.052486 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/4b168504-e6a6-48c2-a8af-dc6a44c77e59-cert\") pod \"infra-operator-controller-manager-769d9c7585-9g6fk\" (UID: \"4b168504-e6a6-48c2-a8af-dc6a44c77e59\") " pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.057692 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.058564 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.059228 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.059308 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.059975 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.062106 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-pkgs6" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.067117 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4wddj\" (UniqueName: \"kubernetes.io/projected/23476d93-a604-4bc2-9e83-5c59e574436c-kube-api-access-4wddj\") pod \"ironic-operator-controller-manager-5c75d7c94b-92vqr\" (UID: \"23476d93-a604-4bc2-9e83-5c59e574436c\") " pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.070390 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66str\" (UniqueName: \"kubernetes.io/projected/ed8b2ca0-7928-41f6-8e30-787058fa0808-kube-api-access-66str\") pod \"neutron-operator-controller-manager-66b7d6f598-wd7tg\" (UID: \"ed8b2ca0-7928-41f6-8e30-787058fa0808\") " pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.071575 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.072865 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-9d4tq" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.073990 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btzwk\" (UniqueName: \"kubernetes.io/projected/6bad4eeb-497a-4459-af08-c6f1db9ee8bf-kube-api-access-btzwk\") pod \"octavia-operator-controller-manager-6fdc856c5d-q68sw\" (UID: \"6bad4eeb-497a-4459-af08-c6f1db9ee8bf\") " pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.075331 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8478\" (UniqueName: \"kubernetes.io/projected/9ea19f26-5477-43e9-84a2-1b8cf72f4f81-kube-api-access-j8478\") pod \"manila-operator-controller-manager-7bb88cb858-gm5p8\" (UID: \"9ea19f26-5477-43e9-84a2-1b8cf72f4f81\") " pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.075580 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vnbj9\" (UniqueName: \"kubernetes.io/projected/703d454c-2336-4941-8fe9-5b717f57423f-kube-api-access-vnbj9\") pod \"keystone-operator-controller-manager-7879fb76fd-5xnb4\" (UID: \"703d454c-2336-4941-8fe9-5b717f57423f\") " pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.075706 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcfjd\" (UniqueName: \"kubernetes.io/projected/4b168504-e6a6-48c2-a8af-dc6a44c77e59-kube-api-access-vcfjd\") pod \"infra-operator-controller-manager-769d9c7585-9g6fk\" (UID: \"4b168504-e6a6-48c2-a8af-dc6a44c77e59\") " pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.080183 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4djt4\" (UniqueName: \"kubernetes.io/projected/9acf7acc-6712-4fbc-ab1c-14a9e1076ab8-kube-api-access-4djt4\") pod \"mariadb-operator-controller-manager-6f8c5b86cb-t6hdc\" (UID: \"9acf7acc-6712-4fbc-ab1c-14a9e1076ab8\") " pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.081309 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcjf9\" (UniqueName: \"kubernetes.io/projected/750a5fd2-0554-4a9d-a16b-9e82cb56694f-kube-api-access-jcjf9\") pod \"nova-operator-controller-manager-86d796d84d-k8mcb\" (UID: \"750a5fd2-0554-4a9d-a16b-9e82cb56694f\") " pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.092373 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bg8zw\" (UniqueName: \"kubernetes.io/projected/e1cfaae9-e5b8-4826-9e34-6fce5657c237-kube-api-access-bg8zw\") pod \"horizon-operator-controller-manager-5d86b44686-j6jzp\" (UID: \"e1cfaae9-e5b8-4826-9e34-6fce5657c237\") " pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.094614 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.097344 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjpq2\" (UniqueName: \"kubernetes.io/projected/0d573b45-216b-4869-96f6-c460bb7ff10f-kube-api-access-rjpq2\") pod \"heat-operator-controller-manager-bf4c6585d-vts55\" (UID: \"0d573b45-216b-4869-96f6-c460bb7ff10f\") " pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.102895 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.106566 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.108173 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.108721 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-trthd" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.142506 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2xjj8\" (UniqueName: \"kubernetes.io/projected/bbb5f842-47da-40fc-a082-50323f1f10f8-kube-api-access-2xjj8\") pod \"placement-operator-controller-manager-6dc664666c-mqzzr\" (UID: \"bbb5f842-47da-40fc-a082-50323f1f10f8\") " pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.142567 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a100268d-89c1-412b-82a6-843711bcb44b-cert\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.142643 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sk8gf\" (UniqueName: \"kubernetes.io/projected/89938523-c1a9-4f6e-aebb-396a3cd509c6-kube-api-access-sk8gf\") pod \"swift-operator-controller-manager-799cb6ffd6-vnrh2\" (UID: \"89938523-c1a9-4f6e-aebb-396a3cd509c6\") " pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.142694 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m4dl8\" (UniqueName: \"kubernetes.io/projected/bbbb8d7a-ee06-4f2e-9982-97b6ee86801d-kube-api-access-m4dl8\") pod \"ovn-operator-controller-manager-5bdf4f7f7f-h6k74\" (UID: \"bbbb8d7a-ee06-4f2e-9982-97b6ee86801d\") " pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.142750 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lr7ct\" (UniqueName: \"kubernetes.io/projected/a100268d-89c1-412b-82a6-843711bcb44b-kube-api-access-lr7ct\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.142791 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvmlh\" (UniqueName: \"kubernetes.io/projected/1b962230-a413-4f39-a8c8-bed04c898724-kube-api-access-mvmlh\") pod \"telemetry-operator-controller-manager-7798859c74-kz8xk\" (UID: \"1b962230-a413-4f39-a8c8-bed04c898724\") " pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.147474 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Nov 22 09:15:24 crc kubenswrapper[4693]: E1122 09:15:24.154936 4693 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 09:15:24 crc kubenswrapper[4693]: E1122 09:15:24.154986 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a100268d-89c1-412b-82a6-843711bcb44b-cert podName:a100268d-89c1-412b-82a6-843711bcb44b nodeName:}" failed. No retries permitted until 2025-11-22 09:15:24.65497073 +0000 UTC m=+720.797473021 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a100268d-89c1-412b-82a6-843711bcb44b-cert") pod "openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" (UID: "a100268d-89c1-412b-82a6-843711bcb44b") : secret "openstack-baremetal-operator-webhook-server-cert" not found Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.159740 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-gfxtn" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.162462 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lr7ct\" (UniqueName: \"kubernetes.io/projected/a100268d-89c1-412b-82a6-843711bcb44b-kube-api-access-lr7ct\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.162907 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m4dl8\" (UniqueName: \"kubernetes.io/projected/bbbb8d7a-ee06-4f2e-9982-97b6ee86801d-kube-api-access-m4dl8\") pod \"ovn-operator-controller-manager-5bdf4f7f7f-h6k74\" (UID: \"bbbb8d7a-ee06-4f2e-9982-97b6ee86801d\") " pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.162985 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.167092 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.169129 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.169642 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rz7hm" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.171191 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-5jh44" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.174466 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.178640 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.185378 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-jcbbf" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.194762 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.196216 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-wnt6j" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.207945 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.216390 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-fqk45" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.226014 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.242327 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-flwxk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.244182 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2xjj8\" (UniqueName: \"kubernetes.io/projected/bbb5f842-47da-40fc-a082-50323f1f10f8-kube-api-access-2xjj8\") pod \"placement-operator-controller-manager-6dc664666c-mqzzr\" (UID: \"bbb5f842-47da-40fc-a082-50323f1f10f8\") " pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.244239 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sk8gf\" (UniqueName: \"kubernetes.io/projected/89938523-c1a9-4f6e-aebb-396a3cd509c6-kube-api-access-sk8gf\") pod \"swift-operator-controller-manager-799cb6ffd6-vnrh2\" (UID: \"89938523-c1a9-4f6e-aebb-396a3cd509c6\") " pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.244302 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvmlh\" (UniqueName: \"kubernetes.io/projected/1b962230-a413-4f39-a8c8-bed04c898724-kube-api-access-mvmlh\") pod \"telemetry-operator-controller-manager-7798859c74-kz8xk\" (UID: \"1b962230-a413-4f39-a8c8-bed04c898724\") " pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.253372 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.262104 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-2ztw5" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.278882 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2xjj8\" (UniqueName: \"kubernetes.io/projected/bbb5f842-47da-40fc-a082-50323f1f10f8-kube-api-access-2xjj8\") pod \"placement-operator-controller-manager-6dc664666c-mqzzr\" (UID: \"bbb5f842-47da-40fc-a082-50323f1f10f8\") " pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.281030 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.281717 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvmlh\" (UniqueName: \"kubernetes.io/projected/1b962230-a413-4f39-a8c8-bed04c898724-kube-api-access-mvmlh\") pod \"telemetry-operator-controller-manager-7798859c74-kz8xk\" (UID: \"1b962230-a413-4f39-a8c8-bed04c898724\") " pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.282234 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sk8gf\" (UniqueName: \"kubernetes.io/projected/89938523-c1a9-4f6e-aebb-396a3cd509c6-kube-api-access-sk8gf\") pod \"swift-operator-controller-manager-799cb6ffd6-vnrh2\" (UID: \"89938523-c1a9-4f6e-aebb-396a3cd509c6\") " pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.283671 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-sxf82" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.284826 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.285863 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-2gdww" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.287902 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-zng76" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.290775 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.291424 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.291775 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.293192 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-zbbl5" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.295366 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.297259 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.345378 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2bhb\" (UniqueName: \"kubernetes.io/projected/3baa8fe0-513d-4a42-a83a-4cc6fbf0e938-kube-api-access-j2bhb\") pod \"test-operator-controller-manager-8464cf66df-cl5qf\" (UID: \"3baa8fe0-513d-4a42-a83a-4cc6fbf0e938\") " pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.376494 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.377420 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.380085 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.380299 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-xnqmz" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.380406 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-mhhw6" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.388702 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.391064 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.400072 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-chj8t" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.409039 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.410290 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-pkgs6" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.416624 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.416964 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-9d4tq" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.426055 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.429692 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-trthd" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.438900 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.449329 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2bhb\" (UniqueName: \"kubernetes.io/projected/3baa8fe0-513d-4a42-a83a-4cc6fbf0e938-kube-api-access-j2bhb\") pod \"test-operator-controller-manager-8464cf66df-cl5qf\" (UID: \"3baa8fe0-513d-4a42-a83a-4cc6fbf0e938\") " pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.449419 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssffx\" (UniqueName: \"kubernetes.io/projected/1f579f14-5558-45aa-9fa5-da9ee0ccac02-kube-api-access-ssffx\") pod \"watcher-operator-controller-manager-7cd4fb6f79-298gp\" (UID: \"1f579f14-5558-45aa-9fa5-da9ee0ccac02\") " pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.466286 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.467311 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.471061 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-vml2t" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.473258 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2bhb\" (UniqueName: \"kubernetes.io/projected/3baa8fe0-513d-4a42-a83a-4cc6fbf0e938-kube-api-access-j2bhb\") pod \"test-operator-controller-manager-8464cf66df-cl5qf\" (UID: \"3baa8fe0-513d-4a42-a83a-4cc6fbf0e938\") " pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.475544 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.484354 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.484614 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.552788 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.552853 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d45s\" (UniqueName: \"kubernetes.io/projected/261611c7-97d3-444c-bba5-e06e1593a5e4-kube-api-access-6d45s\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.552881 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssffx\" (UniqueName: \"kubernetes.io/projected/1f579f14-5558-45aa-9fa5-da9ee0ccac02-kube-api-access-ssffx\") pod \"watcher-operator-controller-manager-7cd4fb6f79-298gp\" (UID: \"1f579f14-5558-45aa-9fa5-da9ee0ccac02\") " pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.582951 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssffx\" (UniqueName: \"kubernetes.io/projected/1f579f14-5558-45aa-9fa5-da9ee0ccac02-kube-api-access-ssffx\") pod \"watcher-operator-controller-manager-7cd4fb6f79-298gp\" (UID: \"1f579f14-5558-45aa-9fa5-da9ee0ccac02\") " pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.589765 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" event={"ID":"390da39a-b184-4348-9894-af8f4237aba8","Type":"ContainerStarted","Data":"fde42fad3372c267e8d271356cec26d061625d0e927262f70f627ffa3cd4175e"} Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.616668 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.633143 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.654712 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8fx48\" (UniqueName: \"kubernetes.io/projected/d7bbf03f-0d49-4144-8ea9-0303a2e5c86e-kube-api-access-8fx48\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn\" (UID: \"d7bbf03f-0d49-4144-8ea9-0303a2e5c86e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.654829 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.654870 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d45s\" (UniqueName: \"kubernetes.io/projected/261611c7-97d3-444c-bba5-e06e1593a5e4-kube-api-access-6d45s\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:24 crc kubenswrapper[4693]: E1122 09:15:24.655091 4693 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 22 09:15:24 crc kubenswrapper[4693]: E1122 09:15:24.655176 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert podName:261611c7-97d3-444c-bba5-e06e1593a5e4 nodeName:}" failed. No retries permitted until 2025-11-22 09:15:25.155159765 +0000 UTC m=+721.297662055 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert") pod "openstack-operator-controller-manager-6cb9dc54f8-qv89g" (UID: "261611c7-97d3-444c-bba5-e06e1593a5e4") : secret "webhook-server-cert" not found Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.658368 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp"] Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.671425 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d45s\" (UniqueName: \"kubernetes.io/projected/261611c7-97d3-444c-bba5-e06e1593a5e4-kube-api-access-6d45s\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:24 crc kubenswrapper[4693]: W1122 09:15:24.679296 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode1cfaae9_e5b8_4826_9e34_6fce5657c237.slice/crio-e4db8c9ac9649f4a552b1be8e06535a51d013966e1adb11d077e56dc1df834ea WatchSource:0}: Error finding container e4db8c9ac9649f4a552b1be8e06535a51d013966e1adb11d077e56dc1df834ea: Status 404 returned error can't find the container with id e4db8c9ac9649f4a552b1be8e06535a51d013966e1adb11d077e56dc1df834ea Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.756116 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8fx48\" (UniqueName: \"kubernetes.io/projected/d7bbf03f-0d49-4144-8ea9-0303a2e5c86e-kube-api-access-8fx48\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn\" (UID: \"d7bbf03f-0d49-4144-8ea9-0303a2e5c86e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.756188 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a100268d-89c1-412b-82a6-843711bcb44b-cert\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.761090 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a100268d-89c1-412b-82a6-843711bcb44b-cert\") pod \"openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4\" (UID: \"a100268d-89c1-412b-82a6-843711bcb44b\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.770002 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8fx48\" (UniqueName: \"kubernetes.io/projected/d7bbf03f-0d49-4144-8ea9-0303a2e5c86e-kube-api-access-8fx48\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn\" (UID: \"d7bbf03f-0d49-4144-8ea9-0303a2e5c86e\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.788126 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.987647 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-jn8tp" Nov 22 09:15:24 crc kubenswrapper[4693]: I1122 09:15:24.997492 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.033153 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.042074 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.048691 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.054527 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.062688 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg"] Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.076685 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poded8b2ca0_7928_41f6_8e30_787058fa0808.slice/crio-de16e8704fae1116182f3ddb86f118776911c99054c6eeef2c5500fc2c7089dc WatchSource:0}: Error finding container de16e8704fae1116182f3ddb86f118776911c99054c6eeef2c5500fc2c7089dc: Status 404 returned error can't find the container with id de16e8704fae1116182f3ddb86f118776911c99054c6eeef2c5500fc2c7089dc Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.086753 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr"] Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.096694 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod23476d93_a604_4bc2_9e83_5c59e574436c.slice/crio-46c110cb6e6bbcb8191edf6f84d1bc52433a9528d405f80ab2e12a0a6b20dc61 WatchSource:0}: Error finding container 46c110cb6e6bbcb8191edf6f84d1bc52433a9528d405f80ab2e12a0a6b20dc61: Status 404 returned error can't find the container with id 46c110cb6e6bbcb8191edf6f84d1bc52433a9528d405f80ab2e12a0a6b20dc61 Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.101897 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb"] Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.112027 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod750a5fd2_0554_4a9d_a16b_9e82cb56694f.slice/crio-46f34236b137b356dee73cc70ffe0df1c49b2433196d4ea7f6ff6480750ed375 WatchSource:0}: Error finding container 46f34236b137b356dee73cc70ffe0df1c49b2433196d4ea7f6ff6480750ed375: Status 404 returned error can't find the container with id 46f34236b137b356dee73cc70ffe0df1c49b2433196d4ea7f6ff6480750ed375 Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.161101 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.161344 4693 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.161460 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert podName:261611c7-97d3-444c-bba5-e06e1593a5e4 nodeName:}" failed. No retries permitted until 2025-11-22 09:15:26.161444105 +0000 UTC m=+722.303946396 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert") pod "openstack-operator-controller-manager-6cb9dc54f8-qv89g" (UID: "261611c7-97d3-444c-bba5-e06e1593a5e4") : secret "webhook-server-cert" not found Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.343437 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.345878 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.348988 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.356012 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.359103 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.364517 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55"] Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.366827 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89938523_c1a9_4f6e_aebb_396a3cd509c6.slice/crio-cd9cef9437fe9b150caeb4f54de804882d29dd9ea6f645cb352400139efa9a67 WatchSource:0}: Error finding container cd9cef9437fe9b150caeb4f54de804882d29dd9ea6f645cb352400139efa9a67: Status 404 returned error can't find the container with id cd9cef9437fe9b150caeb4f54de804882d29dd9ea6f645cb352400139efa9a67 Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.370990 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.373647 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw"] Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.376359 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74"] Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.378161 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-j2bhb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-8464cf66df-cl5qf_openstack-operators(3baa8fe0-513d-4a42-a83a-4cc6fbf0e938): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.379063 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf"] Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.379473 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8455f,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-operator-controller-manager-6d8fd67bf7-lz798_openstack-operators(cadb4705-8655-4f69-b00f-049e64a71b28): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.381515 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn"] Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.382079 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-m4dl8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-5bdf4f7f7f-h6k74_openstack-operators(bbbb8d7a-ee06-4f2e-9982-97b6ee86801d): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.385123 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d573b45_216b_4869_96f6_c460bb7ff10f.slice/crio-b6f5f76aa2ac71486469d93d427f85e55f93e3b7ce24c64440daba01c747c88a WatchSource:0}: Error finding container b6f5f76aa2ac71486469d93d427f85e55f93e3b7ce24c64440daba01c747c88a: Status 404 returned error can't find the container with id b6f5f76aa2ac71486469d93d427f85e55f93e3b7ce24c64440daba01c747c88a Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.389907 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-rjpq2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-bf4c6585d-vts55_openstack-operators(0d573b45-216b-4869-96f6-c460bb7ff10f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.390227 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd7bbf03f_0d49_4144_8ea9_0303a2e5c86e.slice/crio-616c58f7cbad038fe4e0aa6978afce0bdd432330ad079514aaa448eaf1b86f45 WatchSource:0}: Error finding container 616c58f7cbad038fe4e0aa6978afce0bdd432330ad079514aaa448eaf1b86f45: Status 404 returned error can't find the container with id 616c58f7cbad038fe4e0aa6978afce0bdd432330ad079514aaa448eaf1b86f45 Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.391274 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbbb5f842_47da_40fc_a082_50323f1f10f8.slice/crio-751e3f9425131cc42f03c8f0a0c927f47ae588ed42cb8a7ae43f1f79adcdfaf4 WatchSource:0}: Error finding container 751e3f9425131cc42f03c8f0a0c927f47ae588ed42cb8a7ae43f1f79adcdfaf4: Status 404 returned error can't find the container with id 751e3f9425131cc42f03c8f0a0c927f47ae588ed42cb8a7ae43f1f79adcdfaf4 Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.392124 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-8fx48,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn_openstack-operators(d7bbf03f-0d49-4144-8ea9-0303a2e5c86e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.393194 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" podUID="d7bbf03f-0d49-4144-8ea9-0303a2e5c86e" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.393256 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4"] Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.393241 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2xjj8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-operator-controller-manager-6dc664666c-mqzzr_openstack-operators(bbb5f842-47da-40fc-a082-50323f1f10f8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: W1122 09:15:25.402033 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6bad4eeb_497a_4459_af08_c6f1db9ee8bf.slice/crio-e41b1728b2120759ee9122d210f2e38f638ceac362683ebcd8446d609ba1d355 WatchSource:0}: Error finding container e41b1728b2120759ee9122d210f2e38f638ceac362683ebcd8446d609ba1d355: Status 404 returned error can't find the container with id e41b1728b2120759ee9122d210f2e38f638ceac362683ebcd8446d609ba1d355 Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.410625 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-btzwk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-6fdc856c5d-q68sw_openstack-operators(6bad4eeb-497a-4459-af08-c6f1db9ee8bf): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.421449 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent@sha256:7dbadf7b98f2f305f9f1382f55a084c8ca404f4263f76b28e56bd0dc437e2192,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner@sha256:0473ff9eec0da231e2d0a10bf1abbe1dfa1a0f95b8f619e3a07605386951449a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api@sha256:c8101c77a82eae4407e41e1fd766dfc6e1b7f9ed1679e3efb6f91ff97a1557b2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator@sha256:eb9743b21bbadca6f7cb9ac4fc46b5d58c51c674073c7e1121f4474a71304071,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener@sha256:3d81f839b98c2e2a5bf0da79f2f9a92dff7d0a3c5a830b0e95c89dad8cf98a6a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier@sha256:d19ac99249b47dd8ea16cd6aaa5756346aa8a2f119ee50819c15c5366efb417d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24@sha256:8536169e5537fe6c330eba814248abdcf39cdd8f7e7336034d74e6fda9544050,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api@sha256:4c93a5cccb9971e24f05daf93b3aa11ba71752bc3469a1a1a2c4906f92f69645,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener@sha256:4f1fa337760e82bfd67cdd142a97c121146dd7e621daac161940dd5e4ddb80dc,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker@sha256:3613b345d5baed98effd906f8b0242d863e14c97078ea473ef01fe1b0afc46f3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute@sha256:9f9f367ed4c85efb16c3a74a4bb707ff0db271d7bc5abc70a71e984b55f43003,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi@sha256:b73ad22b4955b06d584bce81742556d8c0c7828c495494f8ea7c99391c61b70f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter@sha256:7211a617ec657701ca819aa0ba28e1d5750f5bf2c1391b755cc4a48cc360b0fa,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification@sha256:aa1d3aaf6b394621ed4089a98e0a82b763f467e8b5c5db772f9fdf99fc86e333,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core@sha256:09b5017c95d7697e66b9c64846bc48ef5826a009cba89b956ec54561e5f4a2d1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup@sha256:d6661053141b6df421288a7c9968a155ab82e478c1d75ab41f2cebe2f0ca02d2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler@sha256:ce2d63258cb4e7d0d1c07234de6889c5434464190906798019311a1c7cf6387f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume@sha256:0485ef9e5b4437f7cd2ba54034a87722ce4669ee86b3773c6b0c037ed8000e91,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_API_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-api@sha256:962c004551d0503779364b767b9bf0cecdf78dbba8809b2ca8b073f58e1f4e5d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CLOUDKITTY_PROC_IMAGE_URL_DEFAULT,Value:quay.rdoproject.org/podified-master-centos10/openstack-cloudkitty-processor@sha256:0ebf4c465fb6cc7dad9e6cb2da0ff54874c9acbcb40d62234a629ec2c12cdd62,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api@sha256:ff0c553ceeb2e0f44b010e37dc6d0db8a251797b88e56468b7cf7f05253e4232,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9@sha256:624f553f073af7493d34828b074adc9981cce403edd8e71482c7307008479fd9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central@sha256:e3874936a518c8560339db8f840fc5461885819f6050b5de8d3ab9199bea5094,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns@sha256:1cea25f1d2a45affc80c46fb9d427749d3f06b61590ac6070a2910e3ec8a4e5d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer@sha256:e36d5b9a65194f12f7b01c6422ba3ed52a687fd1695fbb21f4986c67d9f9317f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound@sha256:8b21bec527d54cd766e277889df6bcccd2baeaa946274606b986c0c3b7ca689f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker@sha256:45aceca77f8fcf61127f0da650bdfdf11ede9b0944c78b63fab819d03283f96b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr@sha256:709ac58998927dd61786821ae1e63343fd97ccf5763aac5edb4583eea9401d22,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid@sha256:867d4ef7c21f75e6030a685b5762ab4d84b671316ed6b98d75200076e93342cd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler@sha256:581b65b646301e0fcb07582150ba63438f1353a85bf9acf1eb2acb4ce71c58bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron@sha256:2b90da93550b99d2fcfa95bd819f3363aa68346a416f8dc7baac3e9c5f487761,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd@sha256:6f86db36d668348be8c5b46dcda8b1fa23d34bfdc07164fbcbe7a6327fb4de24,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent@sha256:8cde52cef8795d1c91983b100d86541c7718160ec260fe0f97b96add4c2c8ee8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn@sha256:a9583cb3baf440d2358ef041373833afbeae60da8159dd031502379901141620,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent@sha256:835ebed082fe1c45bd799d1d5357595ce63efeb05ca876f26b08443facb9c164,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent@sha256:011d682241db724bc40736c9b54d2ea450ea7e6be095b1ff5fa28c8007466775,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter@sha256:39c642b2b337e38c18e80266fb14383754178202f40103646337722a594d984c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent@sha256:2025da90cff8f563deb08bee71efe16d4078edc2a767b2e225cca5c77f1aa2f9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter@sha256:d339ba049bbd1adccb795962bf163f5b22fd84dea865d88b9eb525e46247d6bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:26bd7b0bd6070856aefef6fe754c547d55c056396ea30d879d34c2d49b5a1d29,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api@sha256:ff46cd5e0e13d105c4629e78c2734a50835f06b6a1e31da9e0462981d10c4be3,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn@sha256:5b4fd0c2b76fa5539f74687b11c5882d77bd31352452322b37ff51fa18f12a61,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine@sha256:5e03376bd895346dc8f627ca15ded942526ed8b5e92872f453ce272e694d18d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon@sha256:65b94ff9fcd486845fb0544583bf2a973246a61a0ad32340fb92d632285f1057,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached@sha256:36a0fb31978aee0ded2483de311631e64a644d0b0685b5b055f65ede7eb8e8a2,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis@sha256:5f6045841aff0fde6f684a34cdf49f8dc7b2c3bcbdeab201f1058971e0c5f79e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api@sha256:448f4e1b740c30936e340bd6e8534d78c83357bf373a4223950aa64d3484f007,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor@sha256:b68e3615af8a0eb0ef6bf9ceeef59540a6f4a9a85f6078a3620be115c73a7db8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector@sha256:7eae01cf60383e523c9cd94d158a9162120a7370829a1dad20fdea6b0fd660bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent@sha256:28cc10501788081eb61b5a1af35546191a92741f4f109df54c74e2b19439d0f9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe@sha256:9a616e37acfd120612f78043237a8541266ba34883833c9beb43f3da313661ad,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent@sha256:6b1be6cd94a0942259bca5d5d2c30cc7de4a33276b61f8ae3940226772106256,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone@sha256:02d2c22d15401574941fbe057095442dee0d6f7a0a9341de35d25e6a12a3fe4b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics@sha256:db384bf43222b066c378e77027a675d4cd9911107adba46c2922b3a55e10d6fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api@sha256:fc3b3a36b74fd653946723c54b208072d52200635850b531e9d595a7aaea5a01,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler@sha256:7850ccbff320bf9a1c9c769c1c70777eb97117dd8cd5ae4435be9b4622cf807a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share@sha256:397dac7e39cf40d14a986e6ec4a60fb698ca35c197d0db315b1318514cc6d1d4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:10452e2144368e2f128c8fb8ef9e54880b06ef1d71d9f084a0217dcb099c51ce,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils@sha256:1c95142a36276686e720f86423ee171dc9adcc1e89879f627545b7c906ccd9bd,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:18f8463fe46fe6081d5682009e92bbcb3df33282b83b0a2857abaece795cf1ba,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api@sha256:e331a8fde6638e5ba154c4f0b38772a9a424f60656f2777245975fb1fa02f07d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute@sha256:b6e1e8a249d36ef36c6ac4170af1e043dda1ccc0f9672832d3ff151bf3533076,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor@sha256:cd3cf7a34053e850b4d4f9f4ea4c74953a54a42fd18e47d7c01d44a88923e925,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy@sha256:aee28476344fc0cc148fbe97daf9b1bfcedc22001550bba4bdc4e84be7b6989d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler@sha256:cfa0b92c976603ee2a937d34013a238fcd8aa75f998e50642e33489f14124633,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api@sha256:73c2f2d6eecf88acf4e45b133c8373d9bb006b530e0aff0b28f3b7420620a874,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager@sha256:927b405cc04abe5ff716186e8d35e2dc5fad1c8430194659ee6617d74e4e055d,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping@sha256:6154d7cebd7c339afa5b86330262156171743aa5b79c2b78f9a2f378005ed8fb,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog@sha256:e2db2f4af8d3d0be7868c6efef0189f3a2c74a8f96ae10e3f991cdf83feaef29,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker@sha256:c773629df257726a6d3cacc24a6e4df0babcd7d37df04e6d14676a8da028b9c9,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient@sha256:776211111e2e6493706dbc49a3ba44f31d1b947919313ed3a0f35810e304ec52,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather@sha256:0a98e8f5c83522ca6c8e40c5e9561f6628d2d5e69f0e8a64279c541c989d3d8b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter@sha256:ecd56e6733c475f2d441344fd98f288c3eac0261ba113695fec7520a954ccbc7,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi@sha256:7cccf24ad0a152f90ca39893064f48a1656950ee8142685a5d482c71f0bdc9f5,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller@sha256:af46761060c7987e1dee5f14c06d85b46f12ad8e09c83d4246ab4e3a65dfda3e,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base@sha256:05450b48f6b5352b2686a26e933e8727748edae2ae9652d9164b7d7a1817c55a,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server@sha256:fc9c99eeef91523482bd8f92661b393287e1f2a24ad2ba9e33191f8de9af74cf,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd@sha256:3e4ecc02b4b5e0860482a93599ba9ca598c5ce26c093c46e701f96fe51acb208,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server@sha256:2346037e064861c7892690d2e8b3e1eea1a26ce3c3a11fda0b41301965bc828c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api@sha256:7dd2e0dbb6bb5a6cecd1763e43479ca8cb6a0c502534e83c8795c0da2b50e099,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:95d67f51dfedd5bd3ec785b488425295b2d8c41feae3e6386ef471615381809b,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account@sha256:c26c3ff9cabe3593ceb10006e782bf9391ac14785768ce9eec4f938c2d3cf228,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container@sha256:273fe8c27d08d0f62773a02f8cef6a761a7768116ee1a4be611f93bbf63f2b75,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object@sha256:daa45220bb1c47922d0917aa8fe423bb82b03a01429f1c9e37635e701e352d71,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server@sha256:a80a074e227d3238bb6f285788a9e886ae7a5909ccbc5c19c93c369bdfe5b3b8,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all@sha256:58ac66ca1be01fe0157977bd79a26cde4d0de153edfaf4162367c924826b2ef4,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api@sha256:99a63770d80cc7c3afa1118b400972fb0e6bff5284a2eae781b12582ad79c29c,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier@sha256:9ee4d84529394afcd860f1a1186484560f02f08c15c37cac42a22473b7116d5f,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine@sha256:ea15fadda7b0439ec637edfaf6ea5dbf3e35fb3be012c7c5a31e722c90becb11,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-lr7ct,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4_openstack-operators(a100268d-89c1-412b-82a6-843711bcb44b): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.540181 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" podUID="cadb4705-8655-4f69-b00f-049e64a71b28" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.552625 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" podUID="bbbb8d7a-ee06-4f2e-9982-97b6ee86801d" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.561875 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" podUID="3baa8fe0-513d-4a42-a83a-4cc6fbf0e938" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.563015 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" podUID="bbb5f842-47da-40fc-a082-50323f1f10f8" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.568555 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" podUID="0d573b45-216b-4869-96f6-c460bb7ff10f" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.601061 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" event={"ID":"9ea19f26-5477-43e9-84a2-1b8cf72f4f81","Type":"ContainerStarted","Data":"c5e9da7c966dc06b1df06dce705d1dbb669107a5ea48d8d50594528c28720f9f"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.602204 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" event={"ID":"60c907e3-d225-414c-a15c-6f0a6999eb9d","Type":"ContainerStarted","Data":"372ef07921fcc24be55bfa910aea26055e2aa7ea853197536bf7b32b25480cae"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.603387 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" event={"ID":"89938523-c1a9-4f6e-aebb-396a3cd509c6","Type":"ContainerStarted","Data":"cd9cef9437fe9b150caeb4f54de804882d29dd9ea6f645cb352400139efa9a67"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.604631 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" event={"ID":"ed8b2ca0-7928-41f6-8e30-787058fa0808","Type":"ContainerStarted","Data":"de16e8704fae1116182f3ddb86f118776911c99054c6eeef2c5500fc2c7089dc"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.615134 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" event={"ID":"1b962230-a413-4f39-a8c8-bed04c898724","Type":"ContainerStarted","Data":"5802d3c40d0a4a9c9062224bb8c42b37de7eb43e27d2926f7dd878b9e872199c"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.617826 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" event={"ID":"3baa8fe0-513d-4a42-a83a-4cc6fbf0e938","Type":"ContainerStarted","Data":"2fa51bbdc380c9a513f58dccd72ee4a60550ba0bc126aee5588799c48224bd0a"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.617886 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" event={"ID":"3baa8fe0-513d-4a42-a83a-4cc6fbf0e938","Type":"ContainerStarted","Data":"3a0e5849c7b679a328fa6f529965c7fa567a8f2e05b692f1dfafa8f7376c415e"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.621044 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" podUID="3baa8fe0-513d-4a42-a83a-4cc6fbf0e938" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.621701 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" event={"ID":"750a5fd2-0554-4a9d-a16b-9e82cb56694f","Type":"ContainerStarted","Data":"46f34236b137b356dee73cc70ffe0df1c49b2433196d4ea7f6ff6480750ed375"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.622732 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" event={"ID":"a100268d-89c1-412b-82a6-843711bcb44b","Type":"ContainerStarted","Data":"3097f2d6853f28e4923a43e7598760cfe86aea583ff3bb806f841f2e8f9dfdc3"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.623743 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" event={"ID":"703d454c-2336-4941-8fe9-5b717f57423f","Type":"ContainerStarted","Data":"25befaea560c8569b549c799335814f5ac2019a68a9bcc96f6263ec1c0eb0328"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.624491 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" event={"ID":"4b168504-e6a6-48c2-a8af-dc6a44c77e59","Type":"ContainerStarted","Data":"a5e3ffa965528f1dc3ba6f66207e72f9911fe2cedef2e12b09bba82f6cf8b9ce"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.625483 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" event={"ID":"bbbb8d7a-ee06-4f2e-9982-97b6ee86801d","Type":"ContainerStarted","Data":"5e081b8e65998464a961c5665004ad28f6ea02e5f556eac55445d261d12039f5"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.625509 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" event={"ID":"bbbb8d7a-ee06-4f2e-9982-97b6ee86801d","Type":"ContainerStarted","Data":"ffe391819bb7c9f10f5e136a3276ca4ea4329751f527c2032783ab80ebc0ba9f"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.626477 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" podUID="bbbb8d7a-ee06-4f2e-9982-97b6ee86801d" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.629764 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" event={"ID":"23476d93-a604-4bc2-9e83-5c59e574436c","Type":"ContainerStarted","Data":"46c110cb6e6bbcb8191edf6f84d1bc52433a9528d405f80ab2e12a0a6b20dc61"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.630981 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" event={"ID":"1f579f14-5558-45aa-9fa5-da9ee0ccac02","Type":"ContainerStarted","Data":"d6586ae7a4724bad877b7cfdfc8f2c83c81895e430197cca032d162365fe103d"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.631832 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" event={"ID":"d7bbf03f-0d49-4144-8ea9-0303a2e5c86e","Type":"ContainerStarted","Data":"616c58f7cbad038fe4e0aa6978afce0bdd432330ad079514aaa448eaf1b86f45"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.632810 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" podUID="d7bbf03f-0d49-4144-8ea9-0303a2e5c86e" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.634123 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" event={"ID":"e1cfaae9-e5b8-4826-9e34-6fce5657c237","Type":"ContainerStarted","Data":"e4db8c9ac9649f4a552b1be8e06535a51d013966e1adb11d077e56dc1df834ea"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.640648 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" podUID="6bad4eeb-497a-4459-af08-c6f1db9ee8bf" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.643709 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" event={"ID":"6bad4eeb-497a-4459-af08-c6f1db9ee8bf","Type":"ContainerStarted","Data":"e41b1728b2120759ee9122d210f2e38f638ceac362683ebcd8446d609ba1d355"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.647482 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" event={"ID":"f1bb1578-9697-4968-b36f-b77d228fafaa","Type":"ContainerStarted","Data":"2d9a19632c7208f8bc4effef7768cc371f36091d64a7ea0e4241dd0d2a4ffbc0"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.648688 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" event={"ID":"cadb4705-8655-4f69-b00f-049e64a71b28","Type":"ContainerStarted","Data":"df92e99da50004f157a36e0d3e15d458bc910b0d41381f61c7641c1a90c77b92"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.648720 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" event={"ID":"cadb4705-8655-4f69-b00f-049e64a71b28","Type":"ContainerStarted","Data":"023c3c06050c17a1b92c710d4a0423833f22bcf65b46b68bed1086bb32495d99"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.650232 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" podUID="cadb4705-8655-4f69-b00f-049e64a71b28" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.651343 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" event={"ID":"bbb5f842-47da-40fc-a082-50323f1f10f8","Type":"ContainerStarted","Data":"2c2a96fc6fe07715f4c9bb2259565af8b91a312c1099a32a01a01841cc182686"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.651368 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" event={"ID":"bbb5f842-47da-40fc-a082-50323f1f10f8","Type":"ContainerStarted","Data":"751e3f9425131cc42f03c8f0a0c927f47ae588ed42cb8a7ae43f1f79adcdfaf4"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.652065 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" event={"ID":"9acf7acc-6712-4fbc-ab1c-14a9e1076ab8","Type":"ContainerStarted","Data":"05c673fe645edcc40eeb3595e0ec576e89e58b64e60db478f68261d02aab0066"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.652547 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\"" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" podUID="bbb5f842-47da-40fc-a082-50323f1f10f8" Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.655765 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" event={"ID":"0d573b45-216b-4869-96f6-c460bb7ff10f","Type":"ContainerStarted","Data":"1596a4ce8fc963baa7ce405f903044b71d9c300b3b1a68e71caedf3acdb8c56b"} Nov 22 09:15:25 crc kubenswrapper[4693]: I1122 09:15:25.655794 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" event={"ID":"0d573b45-216b-4869-96f6-c460bb7ff10f","Type":"ContainerStarted","Data":"b6f5f76aa2ac71486469d93d427f85e55f93e3b7ce24c64440daba01c747c88a"} Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.657116 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96\\\"\"" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" podUID="0d573b45-216b-4869-96f6-c460bb7ff10f" Nov 22 09:15:25 crc kubenswrapper[4693]: E1122 09:15:25.659239 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" podUID="a100268d-89c1-412b-82a6-843711bcb44b" Nov 22 09:15:26 crc kubenswrapper[4693]: I1122 09:15:26.181004 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:26 crc kubenswrapper[4693]: I1122 09:15:26.185597 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/261611c7-97d3-444c-bba5-e06e1593a5e4-cert\") pod \"openstack-operator-controller-manager-6cb9dc54f8-qv89g\" (UID: \"261611c7-97d3-444c-bba5-e06e1593a5e4\") " pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:26 crc kubenswrapper[4693]: I1122 09:15:26.232257 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:26 crc kubenswrapper[4693]: I1122 09:15:26.664143 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" event={"ID":"a100268d-89c1-412b-82a6-843711bcb44b","Type":"ContainerStarted","Data":"c141de10cdf176f06ab113bbd3d856a73148f7e5ef8fe2cbdda16319cc556d99"} Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.665694 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" podUID="a100268d-89c1-412b-82a6-843711bcb44b" Nov 22 09:15:26 crc kubenswrapper[4693]: I1122 09:15:26.666956 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" event={"ID":"6bad4eeb-497a-4459-af08-c6f1db9ee8bf","Type":"ContainerStarted","Data":"fede8f30e415b10d9b98b2b9e8e452f7e36fbf3b2056925592b2c5dbb624778e"} Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.668920 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/cinder-operator@sha256:553b1288b330ad05771d59c6b73c1681c95f457e8475682f9ad0d2e6b85f37e9\\\"\"" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" podUID="cadb4705-8655-4f69-b00f-049e64a71b28" Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.669294 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" podUID="d7bbf03f-0d49-4144-8ea9-0303a2e5c86e" Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.672836 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:5d49d4594c66eda7b151746cc6e1d3c67c0129b4503eeb043a64ae8ec2da6a1b\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" podUID="bbbb8d7a-ee06-4f2e-9982-97b6ee86801d" Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.673825 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/placement-operator@sha256:4094e7fc11a33e8e2b6768a053cafaf5b122446d23f9113d43d520cb64e9776c\\\"\"" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" podUID="bbb5f842-47da-40fc-a082-50323f1f10f8" Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.674134 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" podUID="6bad4eeb-497a-4459-af08-c6f1db9ee8bf" Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.674571 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:82207e753574d4be246f86c4b074500d66cf20214aa80f0a8525cf3287a35e6d\\\"\"" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" podUID="3baa8fe0-513d-4a42-a83a-4cc6fbf0e938" Nov 22 09:15:26 crc kubenswrapper[4693]: E1122 09:15:26.703196 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:5edd825a235f5784d9a65892763c5388c39df1731d0fcbf4ee33408b8c83ac96\\\"\"" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" podUID="0d573b45-216b-4869-96f6-c460bb7ff10f" Nov 22 09:15:27 crc kubenswrapper[4693]: E1122 09:15:27.674531 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:78852f8ba332a5756c1551c126157f735279101a0fc3277ba4aa4db3478789dd\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" podUID="a100268d-89c1-412b-82a6-843711bcb44b" Nov 22 09:15:27 crc kubenswrapper[4693]: E1122 09:15:27.675426 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:442c269d79163f8da75505019c02e9f0815837aaadcaddacb8e6c12df297ca13\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" podUID="6bad4eeb-497a-4459-af08-c6f1db9ee8bf" Nov 22 09:15:30 crc kubenswrapper[4693]: I1122 09:15:30.246178 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:15:30 crc kubenswrapper[4693]: I1122 09:15:30.246386 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.184038 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g"] Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.745183 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" event={"ID":"390da39a-b184-4348-9894-af8f4237aba8","Type":"ContainerStarted","Data":"4faf8ef4eb62afd3b858b6978285dc372464001c524df189d594a371e2a3865a"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.745322 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" event={"ID":"390da39a-b184-4348-9894-af8f4237aba8","Type":"ContainerStarted","Data":"6568f03acae839a5b87e03464b44f96a7547e108bf87ba0094e419edf503826d"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.746070 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.750873 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" event={"ID":"4b168504-e6a6-48c2-a8af-dc6a44c77e59","Type":"ContainerStarted","Data":"52074ae4a02fec975814ff343bcdb7736b98688a8228d7d97229613d95b1382d"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.752191 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.752294 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" event={"ID":"4b168504-e6a6-48c2-a8af-dc6a44c77e59","Type":"ContainerStarted","Data":"f895bb969eeccd2f16d403d905d4568cba2412ac5c754ce8afe5e5ca8c667ee3"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.753482 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" event={"ID":"f1bb1578-9697-4968-b36f-b77d228fafaa","Type":"ContainerStarted","Data":"7384325d895617ac44309dc50b6dddca2913cf29c7fd3df4672753fedc979e7f"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.760016 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" event={"ID":"23476d93-a604-4bc2-9e83-5c59e574436c","Type":"ContainerStarted","Data":"2dca545889e6cf5fc69891abed79277daabd12043fb8b6c3da941820e760f6e1"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.760067 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" event={"ID":"23476d93-a604-4bc2-9e83-5c59e574436c","Type":"ContainerStarted","Data":"db0ed4bc0f2f062ffcb476165ff39f28d81c18dfbe27d4e23fbdfb3ee700f340"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.760546 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.770408 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" event={"ID":"261611c7-97d3-444c-bba5-e06e1593a5e4","Type":"ContainerStarted","Data":"3237666403d4376fb254e7ba0de1025e79acf8da4eb67265d223ab90f1acc59f"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.784597 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" event={"ID":"9acf7acc-6712-4fbc-ab1c-14a9e1076ab8","Type":"ContainerStarted","Data":"805638958bb34df032cb10ea32bd5d5ef91107ba442e6d401a8e129ff9764a00"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.784719 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" event={"ID":"9acf7acc-6712-4fbc-ab1c-14a9e1076ab8","Type":"ContainerStarted","Data":"b108e5e78ec5b54157f745de4147b7a6a47b644a655182f9d9674a687f78fe13"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.785328 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.783742 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" podStartSLOduration=2.801742548 podStartE2EDuration="9.783721777s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:24.517364414 +0000 UTC m=+720.659866706" lastFinishedPulling="2025-11-22 09:15:31.499343643 +0000 UTC m=+727.641845935" observedRunningTime="2025-11-22 09:15:32.776033085 +0000 UTC m=+728.918535376" watchObservedRunningTime="2025-11-22 09:15:32.783721777 +0000 UTC m=+728.926224089" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.797832 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" event={"ID":"e1cfaae9-e5b8-4826-9e34-6fce5657c237","Type":"ContainerStarted","Data":"68063b1e02f0e54ba506dd7a90d1a60d2bc68f169d408d469733254f3f2b80d1"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.805800 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" podStartSLOduration=3.026327948 podStartE2EDuration="9.805789566s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.056373575 +0000 UTC m=+721.198875866" lastFinishedPulling="2025-11-22 09:15:31.835835192 +0000 UTC m=+727.978337484" observedRunningTime="2025-11-22 09:15:32.803214186 +0000 UTC m=+728.945716476" watchObservedRunningTime="2025-11-22 09:15:32.805789566 +0000 UTC m=+728.948291857" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.810064 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" event={"ID":"60c907e3-d225-414c-a15c-6f0a6999eb9d","Type":"ContainerStarted","Data":"a885915c36ae3528989c47ac4f57253cb9f9a938c9aa0ef9870e471bb71f6402"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.810154 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" event={"ID":"60c907e3-d225-414c-a15c-6f0a6999eb9d","Type":"ContainerStarted","Data":"03ead8105b9237a30f67dec2624fc1f3aaae49be3cd2a408731e2205b19dde0f"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.810515 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.815217 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" event={"ID":"1f579f14-5558-45aa-9fa5-da9ee0ccac02","Type":"ContainerStarted","Data":"6f15c291ae86dbfa90d0620e2a45b18a5eb78d537681b3c2b807a781cf4eede0"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.816717 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" event={"ID":"89938523-c1a9-4f6e-aebb-396a3cd509c6","Type":"ContainerStarted","Data":"44afeeb8d1ebf2353be9dd243ae065848bd71c9ee93abf9afe7e61605c7f0997"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.820127 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" event={"ID":"ed8b2ca0-7928-41f6-8e30-787058fa0808","Type":"ContainerStarted","Data":"7aacae867bb7124019f99de84b198544f7a3f08bcf89ab85a7768dec03388799"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.823755 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" event={"ID":"1b962230-a413-4f39-a8c8-bed04c898724","Type":"ContainerStarted","Data":"db2c7e78fccd0502a69c0efe222743b4be11d45197ece0bf12044e8eceeb7637"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.825387 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" event={"ID":"703d454c-2336-4941-8fe9-5b717f57423f","Type":"ContainerStarted","Data":"5a8e4a43ebf54e70c8aabd406dd46ecd2b970525475ec99287eae372f905e3b3"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.830109 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" event={"ID":"750a5fd2-0554-4a9d-a16b-9e82cb56694f","Type":"ContainerStarted","Data":"f8d5003812e927737756a120b1ff0bc0e65f67f0c149cd51dc7e5c1fe2ba8be5"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.831003 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" podStartSLOduration=3.095381191 podStartE2EDuration="9.830985064s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.100264623 +0000 UTC m=+721.242766913" lastFinishedPulling="2025-11-22 09:15:31.835868495 +0000 UTC m=+727.978370786" observedRunningTime="2025-11-22 09:15:32.82989266 +0000 UTC m=+728.972394951" watchObservedRunningTime="2025-11-22 09:15:32.830985064 +0000 UTC m=+728.973487354" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.835549 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" event={"ID":"9ea19f26-5477-43e9-84a2-1b8cf72f4f81","Type":"ContainerStarted","Data":"841e2cba6a6873f640a8b1367cc92957110acd5f7ab03d4e197a8a7b55ccf028"} Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.863932 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" podStartSLOduration=3.092644456 podStartE2EDuration="9.863912092s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.063792629 +0000 UTC m=+721.206294920" lastFinishedPulling="2025-11-22 09:15:31.835060265 +0000 UTC m=+727.977562556" observedRunningTime="2025-11-22 09:15:32.862345077 +0000 UTC m=+729.004847368" watchObservedRunningTime="2025-11-22 09:15:32.863912092 +0000 UTC m=+729.006414384" Nov 22 09:15:32 crc kubenswrapper[4693]: I1122 09:15:32.891934 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" podStartSLOduration=2.745134036 podStartE2EDuration="9.891918363s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:24.678046542 +0000 UTC m=+720.820548833" lastFinishedPulling="2025-11-22 09:15:31.824830868 +0000 UTC m=+727.967333160" observedRunningTime="2025-11-22 09:15:32.889073527 +0000 UTC m=+729.031575817" watchObservedRunningTime="2025-11-22 09:15:32.891918363 +0000 UTC m=+729.034420654" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.842742 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" event={"ID":"f1bb1578-9697-4968-b36f-b77d228fafaa","Type":"ContainerStarted","Data":"3bc5244967a1699a9689c46f6b307776e4cb45612768e7628c3acc36c5abeab4"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.844622 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" event={"ID":"750a5fd2-0554-4a9d-a16b-9e82cb56694f","Type":"ContainerStarted","Data":"70c387c77405ef60122fa6840a2dc636efd647398dd42d4ba38fcb371ecc018f"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.844735 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.846199 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" event={"ID":"9ea19f26-5477-43e9-84a2-1b8cf72f4f81","Type":"ContainerStarted","Data":"85025fb7798e14ba367077adab2cc37619f4fef10043ea4203694c067e6f0bc9"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.846417 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.847705 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" event={"ID":"1f579f14-5558-45aa-9fa5-da9ee0ccac02","Type":"ContainerStarted","Data":"44c6e5bc4ffbddf4d43c08f0fa8c1d1b3ea6ab2e0515ec0d33dee14eeb861c71"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.847979 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.849539 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" event={"ID":"261611c7-97d3-444c-bba5-e06e1593a5e4","Type":"ContainerStarted","Data":"1aeb456822d095d348091a9489fab6490a028d1ccb193d9624d70b8a719b2e2a"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.849568 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" event={"ID":"261611c7-97d3-444c-bba5-e06e1593a5e4","Type":"ContainerStarted","Data":"73abff1df74fbb487ab3b3a855783af3b71f743486d886f626953f9f9fc4fc18"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.849632 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.851018 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" event={"ID":"89938523-c1a9-4f6e-aebb-396a3cd509c6","Type":"ContainerStarted","Data":"e99d55d1c1b525b5ec1030726ecc17597ac7115b6e34b11387e3dc9c1e591450"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.851164 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.852718 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" event={"ID":"ed8b2ca0-7928-41f6-8e30-787058fa0808","Type":"ContainerStarted","Data":"e4d932fd491ba3c3629a25c3f4f0701bd60587a09af1cb8f2d0adf57a477e2b4"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.852883 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.856127 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" event={"ID":"1b962230-a413-4f39-a8c8-bed04c898724","Type":"ContainerStarted","Data":"7df910ca415ac26135887800d0d03d38d976b49a6178a63493b49e5f656570cd"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.856576 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.858462 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" event={"ID":"703d454c-2336-4941-8fe9-5b717f57423f","Type":"ContainerStarted","Data":"94a556f26b89673c4ad82f0a9d502eb6fe74c9e2eed2ef50857a32bf553c82b0"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.858536 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.860196 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" podStartSLOduration=4.384015984 podStartE2EDuration="10.860173373s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.35944689 +0000 UTC m=+721.501949181" lastFinishedPulling="2025-11-22 09:15:31.835604278 +0000 UTC m=+727.978106570" observedRunningTime="2025-11-22 09:15:33.856796586 +0000 UTC m=+729.999298866" watchObservedRunningTime="2025-11-22 09:15:33.860173373 +0000 UTC m=+730.002675664" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.860393 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" event={"ID":"e1cfaae9-e5b8-4826-9e34-6fce5657c237","Type":"ContainerStarted","Data":"2f1ae9d260b01373c8d0f5b8c3d757e1224d3835cf88f9ff83c5ff8d5121de38"} Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.861036 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.871195 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" podStartSLOduration=4.073659342 podStartE2EDuration="10.871184981s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.052376702 +0000 UTC m=+721.194878994" lastFinishedPulling="2025-11-22 09:15:31.849902343 +0000 UTC m=+727.992404633" observedRunningTime="2025-11-22 09:15:33.867083791 +0000 UTC m=+730.009586082" watchObservedRunningTime="2025-11-22 09:15:33.871184981 +0000 UTC m=+730.013687271" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.877347 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" podStartSLOduration=4.110163683 podStartE2EDuration="10.877334499s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.08924496 +0000 UTC m=+721.231747251" lastFinishedPulling="2025-11-22 09:15:31.856415775 +0000 UTC m=+727.998918067" observedRunningTime="2025-11-22 09:15:33.876891576 +0000 UTC m=+730.019393868" watchObservedRunningTime="2025-11-22 09:15:33.877334499 +0000 UTC m=+730.019836790" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.885918 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" podStartSLOduration=3.409196744 podStartE2EDuration="9.885902623s" podCreationTimestamp="2025-11-22 09:15:24 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.37619483 +0000 UTC m=+721.518697121" lastFinishedPulling="2025-11-22 09:15:31.852900709 +0000 UTC m=+727.995403000" observedRunningTime="2025-11-22 09:15:33.885074957 +0000 UTC m=+730.027577248" watchObservedRunningTime="2025-11-22 09:15:33.885902623 +0000 UTC m=+730.028404914" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.902934 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" podStartSLOduration=9.902926241 podStartE2EDuration="9.902926241s" podCreationTimestamp="2025-11-22 09:15:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:15:33.900369725 +0000 UTC m=+730.042872017" watchObservedRunningTime="2025-11-22 09:15:33.902926241 +0000 UTC m=+730.045428532" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.913329 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" podStartSLOduration=4.428656681 podStartE2EDuration="10.913315959s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.370117647 +0000 UTC m=+721.512619938" lastFinishedPulling="2025-11-22 09:15:31.854776925 +0000 UTC m=+727.997279216" observedRunningTime="2025-11-22 09:15:33.910741731 +0000 UTC m=+730.053244022" watchObservedRunningTime="2025-11-22 09:15:33.913315959 +0000 UTC m=+730.055818251" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.921653 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" podStartSLOduration=3.419619125 podStartE2EDuration="9.921645956s" podCreationTimestamp="2025-11-22 09:15:24 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.366981532 +0000 UTC m=+721.509483824" lastFinishedPulling="2025-11-22 09:15:31.869008364 +0000 UTC m=+728.011510655" observedRunningTime="2025-11-22 09:15:33.920480156 +0000 UTC m=+730.062982446" watchObservedRunningTime="2025-11-22 09:15:33.921645956 +0000 UTC m=+730.064148248" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.933223 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" podStartSLOduration=4.211182847 podStartE2EDuration="10.933216023s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.113725203 +0000 UTC m=+721.256227493" lastFinishedPulling="2025-11-22 09:15:31.835758378 +0000 UTC m=+727.978260669" observedRunningTime="2025-11-22 09:15:33.93040994 +0000 UTC m=+730.072912230" watchObservedRunningTime="2025-11-22 09:15:33.933216023 +0000 UTC m=+730.075718315" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.945289 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" podStartSLOduration=3.791038196 podStartE2EDuration="10.945277705s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:24.681193437 +0000 UTC m=+720.823695728" lastFinishedPulling="2025-11-22 09:15:31.835432946 +0000 UTC m=+727.977935237" observedRunningTime="2025-11-22 09:15:33.94081153 +0000 UTC m=+730.083313820" watchObservedRunningTime="2025-11-22 09:15:33.945277705 +0000 UTC m=+730.087779996" Nov 22 09:15:33 crc kubenswrapper[4693]: I1122 09:15:33.951164 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" podStartSLOduration=4.167862394 podStartE2EDuration="10.951158178s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.05212544 +0000 UTC m=+721.194627731" lastFinishedPulling="2025-11-22 09:15:31.835421224 +0000 UTC m=+727.977923515" observedRunningTime="2025-11-22 09:15:33.949904751 +0000 UTC m=+730.092407042" watchObservedRunningTime="2025-11-22 09:15:33.951158178 +0000 UTC m=+730.093660468" Nov 22 09:15:34 crc kubenswrapper[4693]: I1122 09:15:34.295798 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:40 crc kubenswrapper[4693]: I1122 09:15:40.897095 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" event={"ID":"cadb4705-8655-4f69-b00f-049e64a71b28","Type":"ContainerStarted","Data":"dafc9a326b2a4b037255c66839083388cb1d4d3707ce64375aa5db1c89447876"} Nov 22 09:15:40 crc kubenswrapper[4693]: I1122 09:15:40.897658 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:40 crc kubenswrapper[4693]: I1122 09:15:40.902257 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" event={"ID":"bbb5f842-47da-40fc-a082-50323f1f10f8","Type":"ContainerStarted","Data":"ac0b746e73106d5d0e5ed52166b964a67cb4f9caa72ea66ca126aeb4f8089c5f"} Nov 22 09:15:40 crc kubenswrapper[4693]: I1122 09:15:40.902531 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:40 crc kubenswrapper[4693]: I1122 09:15:40.916906 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" podStartSLOduration=2.584693684 podStartE2EDuration="17.916894304s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.379303223 +0000 UTC m=+721.521805514" lastFinishedPulling="2025-11-22 09:15:40.711503843 +0000 UTC m=+736.854006134" observedRunningTime="2025-11-22 09:15:40.915151128 +0000 UTC m=+737.057653439" watchObservedRunningTime="2025-11-22 09:15:40.916894304 +0000 UTC m=+737.059396595" Nov 22 09:15:40 crc kubenswrapper[4693]: I1122 09:15:40.931528 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" podStartSLOduration=2.612531538 podStartE2EDuration="17.931513452s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.39314021 +0000 UTC m=+721.535642502" lastFinishedPulling="2025-11-22 09:15:40.712122125 +0000 UTC m=+736.854624416" observedRunningTime="2025-11-22 09:15:40.928987435 +0000 UTC m=+737.071489726" watchObservedRunningTime="2025-11-22 09:15:40.931513452 +0000 UTC m=+737.074015744" Nov 22 09:15:43 crc kubenswrapper[4693]: I1122 09:15:43.931928 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" event={"ID":"d7bbf03f-0d49-4144-8ea9-0303a2e5c86e","Type":"ContainerStarted","Data":"759f222f4ec36449c101dc5ada4b63aeef44b8133e3c03277b594463058f222d"} Nov 22 09:15:43 crc kubenswrapper[4693]: I1122 09:15:43.951724 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn" podStartSLOduration=2.337175968 podStartE2EDuration="19.95170152s" podCreationTimestamp="2025-11-22 09:15:24 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.391968458 +0000 UTC m=+721.534470749" lastFinishedPulling="2025-11-22 09:15:43.00649401 +0000 UTC m=+739.148996301" observedRunningTime="2025-11-22 09:15:43.94779147 +0000 UTC m=+740.090293762" watchObservedRunningTime="2025-11-22 09:15:43.95170152 +0000 UTC m=+740.094203831" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.013671 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-56dfb6b67f-r7t48" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.044332 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-8667fbf6f6-xh2cl" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.097157 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-5d86b44686-j6jzp" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.167879 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-769d9c7585-9g6fk" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.180286 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5c75d7c94b-92vqr" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.197350 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-7bb88cb858-gm5p8" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.214267 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-7879fb76fd-5xnb4" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.231146 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-6f8c5b86cb-t6hdc" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.256155 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-66b7d6f598-wd7tg" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.287128 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-86d796d84d-k8mcb" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.304220 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-7768f8c84f-c42f7" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.421984 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-799cb6ffd6-vnrh2" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.441333 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-7798859c74-kz8xk" Nov 22 09:15:44 crc kubenswrapper[4693]: I1122 09:15:44.636103 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-7cd4fb6f79-298gp" Nov 22 09:15:46 crc kubenswrapper[4693]: I1122 09:15:46.238892 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-6cb9dc54f8-qv89g" Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.969118 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" event={"ID":"3baa8fe0-513d-4a42-a83a-4cc6fbf0e938","Type":"ContainerStarted","Data":"6e13640e1197150c92db628f832996df4bd2ae3503566fd337fd634f95e116ed"} Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.969794 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.971476 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" event={"ID":"0d573b45-216b-4869-96f6-c460bb7ff10f","Type":"ContainerStarted","Data":"2a046df110691bd53f089bd48ee84e15c1d60c325abc094beb2f17981393c33a"} Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.971695 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.973462 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" event={"ID":"bbbb8d7a-ee06-4f2e-9982-97b6ee86801d","Type":"ContainerStarted","Data":"b218429402616689d863c49defad7130835350febb18e8dbc4b793da6cd1d8d7"} Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.973727 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.975639 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" event={"ID":"6bad4eeb-497a-4459-af08-c6f1db9ee8bf","Type":"ContainerStarted","Data":"37c9b180ce0d19f616eb31e94a54c041d9b2f48f4f736c88d532a9e24f5d0acf"} Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.975828 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.982396 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" podStartSLOduration=1.752813671 podStartE2EDuration="25.982384414s" podCreationTimestamp="2025-11-22 09:15:24 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.378036282 +0000 UTC m=+721.520538572" lastFinishedPulling="2025-11-22 09:15:49.607607024 +0000 UTC m=+745.750109315" observedRunningTime="2025-11-22 09:15:49.979787784 +0000 UTC m=+746.122290074" watchObservedRunningTime="2025-11-22 09:15:49.982384414 +0000 UTC m=+746.124886706" Nov 22 09:15:49 crc kubenswrapper[4693]: I1122 09:15:49.997066 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" podStartSLOduration=2.8583510199999997 podStartE2EDuration="26.997054739s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.389700124 +0000 UTC m=+721.532202415" lastFinishedPulling="2025-11-22 09:15:49.528403844 +0000 UTC m=+745.670906134" observedRunningTime="2025-11-22 09:15:49.992894368 +0000 UTC m=+746.135396660" watchObservedRunningTime="2025-11-22 09:15:49.997054739 +0000 UTC m=+746.139557030" Nov 22 09:15:50 crc kubenswrapper[4693]: I1122 09:15:50.010048 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" podStartSLOduration=2.851178309 podStartE2EDuration="27.010032783s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.381979934 +0000 UTC m=+721.524482225" lastFinishedPulling="2025-11-22 09:15:49.540834408 +0000 UTC m=+745.683336699" observedRunningTime="2025-11-22 09:15:50.0056847 +0000 UTC m=+746.148186991" watchObservedRunningTime="2025-11-22 09:15:50.010032783 +0000 UTC m=+746.152535074" Nov 22 09:15:50 crc kubenswrapper[4693]: I1122 09:15:50.034387 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" podStartSLOduration=2.889138737 podStartE2EDuration="27.034361741s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.410540195 +0000 UTC m=+721.553042486" lastFinishedPulling="2025-11-22 09:15:49.555763199 +0000 UTC m=+745.698265490" observedRunningTime="2025-11-22 09:15:50.032055427 +0000 UTC m=+746.174557717" watchObservedRunningTime="2025-11-22 09:15:50.034361741 +0000 UTC m=+746.176864022" Nov 22 09:15:50 crc kubenswrapper[4693]: I1122 09:15:50.982995 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" event={"ID":"a100268d-89c1-412b-82a6-843711bcb44b","Type":"ContainerStarted","Data":"1618b73c28a91bff9a5ef559d34ef624e5b14c24262b3ab346acb5c13af1df15"} Nov 22 09:15:51 crc kubenswrapper[4693]: I1122 09:15:51.007068 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" podStartSLOduration=3.017071559 podStartE2EDuration="28.007051135s" podCreationTimestamp="2025-11-22 09:15:23 +0000 UTC" firstStartedPulling="2025-11-22 09:15:25.421091718 +0000 UTC m=+721.563594009" lastFinishedPulling="2025-11-22 09:15:50.411071293 +0000 UTC m=+746.553573585" observedRunningTime="2025-11-22 09:15:51.003494901 +0000 UTC m=+747.145997192" watchObservedRunningTime="2025-11-22 09:15:51.007051135 +0000 UTC m=+747.149553426" Nov 22 09:15:52 crc kubenswrapper[4693]: I1122 09:15:52.187577 4693 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.294798 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-6fdc856c5d-q68sw" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.299898 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-6d8fd67bf7-lz798" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.391985 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-bf4c6585d-vts55" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.411043 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-5bdf4f7f7f-h6k74" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.427820 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-6dc664666c-mqzzr" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.486912 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-8464cf66df-cl5qf" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.972201 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j9c7f"] Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.973493 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.985898 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j9c7f"] Nov 22 09:15:54 crc kubenswrapper[4693]: I1122 09:15:54.997902 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.146403 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-utilities\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.146455 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-catalog-content\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.146816 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs5lq\" (UniqueName: \"kubernetes.io/projected/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-kube-api-access-qs5lq\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.248207 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qs5lq\" (UniqueName: \"kubernetes.io/projected/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-kube-api-access-qs5lq\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.248304 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-utilities\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.248337 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-catalog-content\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.248797 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-utilities\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.248829 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-catalog-content\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.271604 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs5lq\" (UniqueName: \"kubernetes.io/projected/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-kube-api-access-qs5lq\") pod \"certified-operators-j9c7f\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.288780 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:15:55 crc kubenswrapper[4693]: I1122 09:15:55.678930 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j9c7f"] Nov 22 09:15:55 crc kubenswrapper[4693]: W1122 09:15:55.685296 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb7ef8bf6_cd05_49b2_a96b_5cbeadb946b3.slice/crio-7a7d98c162d955a8fd893f5b288221b6454ed4e645db665a7355f6ad07de3d0c WatchSource:0}: Error finding container 7a7d98c162d955a8fd893f5b288221b6454ed4e645db665a7355f6ad07de3d0c: Status 404 returned error can't find the container with id 7a7d98c162d955a8fd893f5b288221b6454ed4e645db665a7355f6ad07de3d0c Nov 22 09:15:56 crc kubenswrapper[4693]: I1122 09:15:56.011543 4693 generic.go:334] "Generic (PLEG): container finished" podID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerID="e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374" exitCode=0 Nov 22 09:15:56 crc kubenswrapper[4693]: I1122 09:15:56.011734 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j9c7f" event={"ID":"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3","Type":"ContainerDied","Data":"e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374"} Nov 22 09:15:56 crc kubenswrapper[4693]: I1122 09:15:56.011798 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j9c7f" event={"ID":"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3","Type":"ContainerStarted","Data":"7a7d98c162d955a8fd893f5b288221b6454ed4e645db665a7355f6ad07de3d0c"} Nov 22 09:15:57 crc kubenswrapper[4693]: I1122 09:15:57.018324 4693 generic.go:334] "Generic (PLEG): container finished" podID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerID="0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc" exitCode=0 Nov 22 09:15:57 crc kubenswrapper[4693]: I1122 09:15:57.018413 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j9c7f" event={"ID":"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3","Type":"ContainerDied","Data":"0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc"} Nov 22 09:15:58 crc kubenswrapper[4693]: I1122 09:15:58.025495 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j9c7f" event={"ID":"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3","Type":"ContainerStarted","Data":"f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7"} Nov 22 09:15:58 crc kubenswrapper[4693]: I1122 09:15:58.039620 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j9c7f" podStartSLOduration=2.550506548 podStartE2EDuration="4.039605695s" podCreationTimestamp="2025-11-22 09:15:54 +0000 UTC" firstStartedPulling="2025-11-22 09:15:56.013241677 +0000 UTC m=+752.155743968" lastFinishedPulling="2025-11-22 09:15:57.502340824 +0000 UTC m=+753.644843115" observedRunningTime="2025-11-22 09:15:58.037792848 +0000 UTC m=+754.180295139" watchObservedRunningTime="2025-11-22 09:15:58.039605695 +0000 UTC m=+754.182107986" Nov 22 09:16:00 crc kubenswrapper[4693]: I1122 09:16:00.246380 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:16:00 crc kubenswrapper[4693]: I1122 09:16:00.246701 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:16:00 crc kubenswrapper[4693]: I1122 09:16:00.246744 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:16:00 crc kubenswrapper[4693]: I1122 09:16:00.247237 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fdd620c02223caa992b894c417a67e53ca6a47a2b100ed782945632677fd5de7"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:16:00 crc kubenswrapper[4693]: I1122 09:16:00.247288 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://fdd620c02223caa992b894c417a67e53ca6a47a2b100ed782945632677fd5de7" gracePeriod=600 Nov 22 09:16:01 crc kubenswrapper[4693]: I1122 09:16:01.041965 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="fdd620c02223caa992b894c417a67e53ca6a47a2b100ed782945632677fd5de7" exitCode=0 Nov 22 09:16:01 crc kubenswrapper[4693]: I1122 09:16:01.041989 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"fdd620c02223caa992b894c417a67e53ca6a47a2b100ed782945632677fd5de7"} Nov 22 09:16:01 crc kubenswrapper[4693]: I1122 09:16:01.042403 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"b6cafeb0ae9a0297d68d27b5e4364409ddbf47f49b1097384068670f8ea0353f"} Nov 22 09:16:01 crc kubenswrapper[4693]: I1122 09:16:01.042421 4693 scope.go:117] "RemoveContainer" containerID="329ed12425e201bb8bae43071b579a5f24384df493cc4b6bb11f0437b6b63f96" Nov 22 09:16:05 crc kubenswrapper[4693]: I1122 09:16:05.002161 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4" Nov 22 09:16:05 crc kubenswrapper[4693]: I1122 09:16:05.289472 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:16:05 crc kubenswrapper[4693]: I1122 09:16:05.289515 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:16:05 crc kubenswrapper[4693]: I1122 09:16:05.324043 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:16:06 crc kubenswrapper[4693]: I1122 09:16:06.098810 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:16:06 crc kubenswrapper[4693]: I1122 09:16:06.129729 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j9c7f"] Nov 22 09:16:07 crc kubenswrapper[4693]: I1122 09:16:07.951690 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xpsxb"] Nov 22 09:16:07 crc kubenswrapper[4693]: I1122 09:16:07.953007 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:07 crc kubenswrapper[4693]: I1122 09:16:07.958684 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xpsxb"] Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.001708 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ddkmp\" (UniqueName: \"kubernetes.io/projected/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-kube-api-access-ddkmp\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.001806 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-utilities\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.001863 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-catalog-content\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.076985 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j9c7f" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="registry-server" containerID="cri-o://f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7" gracePeriod=2 Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.103473 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-catalog-content\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.103512 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-utilities\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.103574 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ddkmp\" (UniqueName: \"kubernetes.io/projected/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-kube-api-access-ddkmp\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.103932 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-catalog-content\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.103972 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-utilities\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.120916 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ddkmp\" (UniqueName: \"kubernetes.io/projected/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-kube-api-access-ddkmp\") pod \"community-operators-xpsxb\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.266508 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.453469 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.504623 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xpsxb"] Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.517674 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-catalog-content\") pod \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.517710 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-utilities\") pod \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.517735 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs5lq\" (UniqueName: \"kubernetes.io/projected/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-kube-api-access-qs5lq\") pod \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\" (UID: \"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3\") " Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.519182 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-utilities" (OuterVolumeSpecName: "utilities") pod "b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" (UID: "b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.521447 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-kube-api-access-qs5lq" (OuterVolumeSpecName: "kube-api-access-qs5lq") pod "b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" (UID: "b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3"). InnerVolumeSpecName "kube-api-access-qs5lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.553998 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" (UID: "b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.618887 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.618913 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:08 crc kubenswrapper[4693]: I1122 09:16:08.618923 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs5lq\" (UniqueName: \"kubernetes.io/projected/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3-kube-api-access-qs5lq\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.091873 4693 generic.go:334] "Generic (PLEG): container finished" podID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerID="78fce862412920005a3b97f5e652bd271488a02c080fbb919f18e202987683a2" exitCode=0 Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.091966 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerDied","Data":"78fce862412920005a3b97f5e652bd271488a02c080fbb919f18e202987683a2"} Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.092388 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerStarted","Data":"cab9818f94f63409de313f627a5db42a563287eefa29fc93fda1d550d68e944a"} Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.095719 4693 generic.go:334] "Generic (PLEG): container finished" podID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerID="f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7" exitCode=0 Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.095756 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j9c7f" event={"ID":"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3","Type":"ContainerDied","Data":"f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7"} Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.095778 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j9c7f" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.095789 4693 scope.go:117] "RemoveContainer" containerID="f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.095780 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j9c7f" event={"ID":"b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3","Type":"ContainerDied","Data":"7a7d98c162d955a8fd893f5b288221b6454ed4e645db665a7355f6ad07de3d0c"} Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.114147 4693 scope.go:117] "RemoveContainer" containerID="0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.120009 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j9c7f"] Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.122366 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j9c7f"] Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.146291 4693 scope.go:117] "RemoveContainer" containerID="e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.158345 4693 scope.go:117] "RemoveContainer" containerID="f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7" Nov 22 09:16:09 crc kubenswrapper[4693]: E1122 09:16:09.158735 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7\": container with ID starting with f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7 not found: ID does not exist" containerID="f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.158806 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7"} err="failed to get container status \"f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7\": rpc error: code = NotFound desc = could not find container \"f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7\": container with ID starting with f9b16a2181e4cf861d61857f12d10db11e6d4bf3a70cb00d0feaa422cdf723a7 not found: ID does not exist" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.158870 4693 scope.go:117] "RemoveContainer" containerID="0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc" Nov 22 09:16:09 crc kubenswrapper[4693]: E1122 09:16:09.159221 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc\": container with ID starting with 0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc not found: ID does not exist" containerID="0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.159261 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc"} err="failed to get container status \"0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc\": rpc error: code = NotFound desc = could not find container \"0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc\": container with ID starting with 0f6c8ed20e574892212f83f9cd105510d2eec8173439709b8a03eb0f2256fffc not found: ID does not exist" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.159279 4693 scope.go:117] "RemoveContainer" containerID="e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374" Nov 22 09:16:09 crc kubenswrapper[4693]: E1122 09:16:09.159572 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374\": container with ID starting with e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374 not found: ID does not exist" containerID="e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374" Nov 22 09:16:09 crc kubenswrapper[4693]: I1122 09:16:09.159611 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374"} err="failed to get container status \"e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374\": rpc error: code = NotFound desc = could not find container \"e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374\": container with ID starting with e06c877a657b1cee4933a46a327670d9193368b519684ba1e43d20e928c5f374 not found: ID does not exist" Nov 22 09:16:10 crc kubenswrapper[4693]: I1122 09:16:10.103509 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerStarted","Data":"a475d84bc2fb900b2bc4568451d56f2a3f549712e667a0d2f59f756ff5f16273"} Nov 22 09:16:10 crc kubenswrapper[4693]: I1122 09:16:10.153468 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" path="/var/lib/kubelet/pods/b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3/volumes" Nov 22 09:16:11 crc kubenswrapper[4693]: I1122 09:16:11.110980 4693 generic.go:334] "Generic (PLEG): container finished" podID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerID="a475d84bc2fb900b2bc4568451d56f2a3f549712e667a0d2f59f756ff5f16273" exitCode=0 Nov 22 09:16:11 crc kubenswrapper[4693]: I1122 09:16:11.111028 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerDied","Data":"a475d84bc2fb900b2bc4568451d56f2a3f549712e667a0d2f59f756ff5f16273"} Nov 22 09:16:12 crc kubenswrapper[4693]: I1122 09:16:12.117701 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerStarted","Data":"fbf15b567c70e11c444ade9f4d7030302b2777ca992667a2a2b66745652b5b69"} Nov 22 09:16:12 crc kubenswrapper[4693]: I1122 09:16:12.131982 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xpsxb" podStartSLOduration=2.415142303 podStartE2EDuration="5.131968722s" podCreationTimestamp="2025-11-22 09:16:07 +0000 UTC" firstStartedPulling="2025-11-22 09:16:09.093415606 +0000 UTC m=+765.235917897" lastFinishedPulling="2025-11-22 09:16:11.810242035 +0000 UTC m=+767.952744316" observedRunningTime="2025-11-22 09:16:12.129583558 +0000 UTC m=+768.272085849" watchObservedRunningTime="2025-11-22 09:16:12.131968722 +0000 UTC m=+768.274471012" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.149471 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vgkkr"] Nov 22 09:16:15 crc kubenswrapper[4693]: E1122 09:16:15.150021 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="registry-server" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.150032 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="registry-server" Nov 22 09:16:15 crc kubenswrapper[4693]: E1122 09:16:15.150052 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="extract-content" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.150058 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="extract-content" Nov 22 09:16:15 crc kubenswrapper[4693]: E1122 09:16:15.150071 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="extract-utilities" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.150077 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="extract-utilities" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.150186 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7ef8bf6-cd05-49b2-a96b-5cbeadb946b3" containerName="registry-server" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.151090 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.160374 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgkkr"] Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.300383 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-catalog-content\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.300473 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/d07889e1-fb32-44b5-93dd-645802cb80df-kube-api-access-t9zs9\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.300657 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-utilities\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.401784 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/d07889e1-fb32-44b5-93dd-645802cb80df-kube-api-access-t9zs9\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.401826 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-utilities\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.401899 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-catalog-content\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.402298 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-catalog-content\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.402377 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-utilities\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.418514 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/d07889e1-fb32-44b5-93dd-645802cb80df-kube-api-access-t9zs9\") pod \"redhat-marketplace-vgkkr\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.463306 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:15 crc kubenswrapper[4693]: I1122 09:16:15.864070 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgkkr"] Nov 22 09:16:16 crc kubenswrapper[4693]: I1122 09:16:16.145916 4693 generic.go:334] "Generic (PLEG): container finished" podID="d07889e1-fb32-44b5-93dd-645802cb80df" containerID="f43b91cf9c2a421408f5c754a4c09c88fed459adb73e610b4b294078e22c6395" exitCode=0 Nov 22 09:16:16 crc kubenswrapper[4693]: I1122 09:16:16.153747 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgkkr" event={"ID":"d07889e1-fb32-44b5-93dd-645802cb80df","Type":"ContainerDied","Data":"f43b91cf9c2a421408f5c754a4c09c88fed459adb73e610b4b294078e22c6395"} Nov 22 09:16:16 crc kubenswrapper[4693]: I1122 09:16:16.153785 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgkkr" event={"ID":"d07889e1-fb32-44b5-93dd-645802cb80df","Type":"ContainerStarted","Data":"50cd6af7dd668dd2fb5a3ce2e00c80c7fd771c753e1243ab22862a3d8fcbcbea"} Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.157936 4693 generic.go:334] "Generic (PLEG): container finished" podID="d07889e1-fb32-44b5-93dd-645802cb80df" containerID="fd51e568ef849e087197981178ae825bdad4891a533a349b60539eba0344d19b" exitCode=0 Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.158023 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgkkr" event={"ID":"d07889e1-fb32-44b5-93dd-645802cb80df","Type":"ContainerDied","Data":"fd51e568ef849e087197981178ae825bdad4891a533a349b60539eba0344d19b"} Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.266800 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.266922 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.299833 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.746972 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-5nnlr"] Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.754249 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.758247 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-jslr7" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.758334 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.758495 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.773128 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.784786 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-5nnlr"] Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.808667 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6584b49599-bb8br"] Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.810140 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.812898 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.817292 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-bb8br"] Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.863230 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ghk88\" (UniqueName: \"kubernetes.io/projected/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-kube-api-access-ghk88\") pod \"dnsmasq-dns-7bdd77c89-5nnlr\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.863312 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-config\") pod \"dnsmasq-dns-7bdd77c89-5nnlr\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.863343 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-config\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.965056 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qglj8\" (UniqueName: \"kubernetes.io/projected/6b389e12-2848-4088-b36d-5943826ff156-kube-api-access-qglj8\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.965113 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-config\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.965272 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ghk88\" (UniqueName: \"kubernetes.io/projected/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-kube-api-access-ghk88\") pod \"dnsmasq-dns-7bdd77c89-5nnlr\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.965622 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-dns-svc\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.965814 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-config\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.965862 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-config\") pod \"dnsmasq-dns-7bdd77c89-5nnlr\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.966612 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-config\") pod \"dnsmasq-dns-7bdd77c89-5nnlr\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:18 crc kubenswrapper[4693]: I1122 09:16:18.994541 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ghk88\" (UniqueName: \"kubernetes.io/projected/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-kube-api-access-ghk88\") pod \"dnsmasq-dns-7bdd77c89-5nnlr\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.067194 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qglj8\" (UniqueName: \"kubernetes.io/projected/6b389e12-2848-4088-b36d-5943826ff156-kube-api-access-qglj8\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.067264 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-dns-svc\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.067943 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-dns-svc\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.077868 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.081593 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qglj8\" (UniqueName: \"kubernetes.io/projected/6b389e12-2848-4088-b36d-5943826ff156-kube-api-access-qglj8\") pod \"dnsmasq-dns-6584b49599-bb8br\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.131892 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.175232 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgkkr" event={"ID":"d07889e1-fb32-44b5-93dd-645802cb80df","Type":"ContainerStarted","Data":"e40e1076b34d65f9eb4d04d3181626ea1fcc9309bdff82c2b8d10505d84a91c6"} Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.233181 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.253338 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vgkkr" podStartSLOduration=1.778743097 podStartE2EDuration="4.253321357s" podCreationTimestamp="2025-11-22 09:16:15 +0000 UTC" firstStartedPulling="2025-11-22 09:16:16.147149314 +0000 UTC m=+772.289651605" lastFinishedPulling="2025-11-22 09:16:18.621727575 +0000 UTC m=+774.764229865" observedRunningTime="2025-11-22 09:16:19.198457524 +0000 UTC m=+775.340959815" watchObservedRunningTime="2025-11-22 09:16:19.253321357 +0000 UTC m=+775.395823648" Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.454615 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-5nnlr"] Nov 22 09:16:19 crc kubenswrapper[4693]: W1122 09:16:19.456375 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podebe43d74_2a02_49cc_93d4_d0f4f1cc3b1b.slice/crio-1bd0555aa16e41d201de03d98df9f2f3382161a48d3f26f825e2831fccebcee6 WatchSource:0}: Error finding container 1bd0555aa16e41d201de03d98df9f2f3382161a48d3f26f825e2831fccebcee6: Status 404 returned error can't find the container with id 1bd0555aa16e41d201de03d98df9f2f3382161a48d3f26f825e2831fccebcee6 Nov 22 09:16:19 crc kubenswrapper[4693]: I1122 09:16:19.528782 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-bb8br"] Nov 22 09:16:19 crc kubenswrapper[4693]: W1122 09:16:19.529511 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6b389e12_2848_4088_b36d_5943826ff156.slice/crio-a46263f53a450ceff2dc948634df1b4e5e827e44c0da85f85bbe9a1e631f049f WatchSource:0}: Error finding container a46263f53a450ceff2dc948634df1b4e5e827e44c0da85f85bbe9a1e631f049f: Status 404 returned error can't find the container with id a46263f53a450ceff2dc948634df1b4e5e827e44c0da85f85bbe9a1e631f049f Nov 22 09:16:20 crc kubenswrapper[4693]: I1122 09:16:20.179174 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-bb8br" event={"ID":"6b389e12-2848-4088-b36d-5943826ff156","Type":"ContainerStarted","Data":"a46263f53a450ceff2dc948634df1b4e5e827e44c0da85f85bbe9a1e631f049f"} Nov 22 09:16:20 crc kubenswrapper[4693]: I1122 09:16:20.180441 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" event={"ID":"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b","Type":"ContainerStarted","Data":"1bd0555aa16e41d201de03d98df9f2f3382161a48d3f26f825e2831fccebcee6"} Nov 22 09:16:20 crc kubenswrapper[4693]: I1122 09:16:20.546291 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xpsxb"] Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.875904 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-bb8br"] Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.889344 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-nd8zf"] Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.890420 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.903441 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-nd8zf"] Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.940521 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nhplq\" (UniqueName: \"kubernetes.io/projected/917b4902-7f7c-4344-a0dc-26262efb2c82-kube-api-access-nhplq\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.940661 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:21 crc kubenswrapper[4693]: I1122 09:16:21.940728 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-config\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.042343 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-config\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.042419 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nhplq\" (UniqueName: \"kubernetes.io/projected/917b4902-7f7c-4344-a0dc-26262efb2c82-kube-api-access-nhplq\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.042476 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.043199 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-config\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.043362 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-dns-svc\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.080527 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nhplq\" (UniqueName: \"kubernetes.io/projected/917b4902-7f7c-4344-a0dc-26262efb2c82-kube-api-access-nhplq\") pod \"dnsmasq-dns-7c6d9948dc-nd8zf\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.130007 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-5nnlr"] Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.168476 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-nlqz5"] Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.169463 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.173672 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-nlqz5"] Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.204772 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xpsxb" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="registry-server" containerID="cri-o://fbf15b567c70e11c444ade9f4d7030302b2777ca992667a2a2b66745652b5b69" gracePeriod=2 Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.215755 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.245474 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khdjb\" (UniqueName: \"kubernetes.io/projected/21e862c1-def8-4f98-ae0a-8fdd19e37e32-kube-api-access-khdjb\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.245533 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-dns-svc\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.245559 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-config\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.346811 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khdjb\" (UniqueName: \"kubernetes.io/projected/21e862c1-def8-4f98-ae0a-8fdd19e37e32-kube-api-access-khdjb\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.346872 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-dns-svc\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.346903 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-config\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.347693 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-dns-svc\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.347714 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-config\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.361155 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khdjb\" (UniqueName: \"kubernetes.io/projected/21e862c1-def8-4f98-ae0a-8fdd19e37e32-kube-api-access-khdjb\") pod \"dnsmasq-dns-6486446b9f-nlqz5\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:22 crc kubenswrapper[4693]: I1122 09:16:22.506050 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.024774 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.026031 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.028147 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.028375 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-2qhj8" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.028536 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.028735 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.028950 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.030278 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.030786 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.037064 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054543 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-server-conf\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054574 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99df5d88-540a-495c-a688-43f4d63ffa45-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054600 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054647 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054674 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054713 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dthks\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-kube-api-access-dthks\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054752 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99df5d88-540a-495c-a688-43f4d63ffa45-pod-info\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054777 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-config-data\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054792 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054891 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.054914 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.155764 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.155801 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.155825 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-server-conf\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.155863 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99df5d88-540a-495c-a688-43f4d63ffa45-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.155882 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.156084 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.156130 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.156215 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dthks\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-kube-api-access-dthks\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.156305 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99df5d88-540a-495c-a688-43f4d63ffa45-pod-info\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.156335 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-config-data\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.156351 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.157705 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.158441 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.158695 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.158743 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-server-conf\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.158940 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.159304 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-config-data\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.162927 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.165524 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99df5d88-540a-495c-a688-43f4d63ffa45-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.166396 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.173879 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99df5d88-540a-495c-a688-43f4d63ffa45-pod-info\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.176937 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dthks\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-kube-api-access-dthks\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.177862 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.212873 4693 generic.go:334] "Generic (PLEG): container finished" podID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerID="fbf15b567c70e11c444ade9f4d7030302b2777ca992667a2a2b66745652b5b69" exitCode=0 Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.212905 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerDied","Data":"fbf15b567c70e11c444ade9f4d7030302b2777ca992667a2a2b66745652b5b69"} Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.267851 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.269014 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.270477 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.272836 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.273166 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.273400 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.274421 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.274735 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-hqxpl" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.275933 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.282066 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.349132 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.460387 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.460651 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c687d537-2713-42cf-9f20-ef640bbd6c3c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.460708 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.460749 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.460796 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c687d537-2713-42cf-9f20-ef640bbd6c3c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.460816 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w6fxg\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-kube-api-access-w6fxg\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.461160 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.461247 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.461383 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.461410 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.461435 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562452 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562507 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562529 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562551 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562567 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c687d537-2713-42cf-9f20-ef640bbd6c3c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562588 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562622 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562644 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c687d537-2713-42cf-9f20-ef640bbd6c3c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562663 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w6fxg\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-kube-api-access-w6fxg\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562686 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.562716 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.564371 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.564594 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.564799 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.565099 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.565300 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.565697 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.569375 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c687d537-2713-42cf-9f20-ef640bbd6c3c-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.569756 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.569954 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.573391 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c687d537-2713-42cf-9f20-ef640bbd6c3c-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.579996 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w6fxg\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-kube-api-access-w6fxg\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.580039 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:23 crc kubenswrapper[4693]: I1122 09:16:23.593024 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.667113 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.671725 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.674527 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-c5lmf" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.675166 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.675711 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.675861 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.675898 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.688315 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776111 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d64e5e3-8006-4833-a05d-705799403cc2-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776149 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776186 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776306 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d64e5e3-8006-4833-a05d-705799403cc2-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776349 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tll4f\" (UniqueName: \"kubernetes.io/projected/1d64e5e3-8006-4833-a05d-705799403cc2-kube-api-access-tll4f\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776460 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776479 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.776557 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d64e5e3-8006-4833-a05d-705799403cc2-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877356 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d64e5e3-8006-4833-a05d-705799403cc2-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877579 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d64e5e3-8006-4833-a05d-705799403cc2-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877605 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877637 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877671 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d64e5e3-8006-4833-a05d-705799403cc2-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877691 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tll4f\" (UniqueName: \"kubernetes.io/projected/1d64e5e3-8006-4833-a05d-705799403cc2-kube-api-access-tll4f\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877728 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.877744 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.878358 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-kolla-config\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.878564 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1d64e5e3-8006-4833-a05d-705799403cc2-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.880821 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.881822 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.884522 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1d64e5e3-8006-4833-a05d-705799403cc2-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.885026 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1d64e5e3-8006-4833-a05d-705799403cc2-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.886317 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1d64e5e3-8006-4833-a05d-705799403cc2-config-data-default\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.899288 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.904040 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tll4f\" (UniqueName: \"kubernetes.io/projected/1d64e5e3-8006-4833-a05d-705799403cc2-kube-api-access-tll4f\") pod \"openstack-galera-0\" (UID: \"1d64e5e3-8006-4833-a05d-705799403cc2\") " pod="openstack/openstack-galera-0" Nov 22 09:16:24 crc kubenswrapper[4693]: I1122 09:16:24.999686 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Nov 22 09:16:25 crc kubenswrapper[4693]: I1122 09:16:25.463798 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:25 crc kubenswrapper[4693]: I1122 09:16:25.463893 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:25 crc kubenswrapper[4693]: I1122 09:16:25.499857 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.132329 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.133645 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.136166 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.136311 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.136726 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.139116 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-wmgc4" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.143523 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.269416 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.300862 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-556ms\" (UniqueName: \"kubernetes.io/projected/957b6aef-f771-4011-b712-e53794ad836a-kube-api-access-556ms\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301286 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/957b6aef-f771-4011-b712-e53794ad836a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301312 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/957b6aef-f771-4011-b712-e53794ad836a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301349 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301392 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301411 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301504 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/957b6aef-f771-4011-b712-e53794ad836a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.301520 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.402933 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/957b6aef-f771-4011-b712-e53794ad836a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.402976 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/957b6aef-f771-4011-b712-e53794ad836a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403018 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403070 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403085 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403185 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/957b6aef-f771-4011-b712-e53794ad836a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403204 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403246 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-556ms\" (UniqueName: \"kubernetes.io/projected/957b6aef-f771-4011-b712-e53794ad836a-kube-api-access-556ms\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.403635 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.404507 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/957b6aef-f771-4011-b712-e53794ad836a-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.404665 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.404893 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.405568 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/957b6aef-f771-4011-b712-e53794ad836a-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.411349 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/957b6aef-f771-4011-b712-e53794ad836a-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.411911 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/957b6aef-f771-4011-b712-e53794ad836a-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.416778 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-556ms\" (UniqueName: \"kubernetes.io/projected/957b6aef-f771-4011-b712-e53794ad836a-kube-api-access-556ms\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.421316 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"openstack-cell1-galera-0\" (UID: \"957b6aef-f771-4011-b712-e53794ad836a\") " pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.461151 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.613251 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.614375 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.616148 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-4zv4n" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.616985 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.617162 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.620462 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.708039 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbaee87f-e8c6-4e56-9b59-c0f50054c172-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.708100 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cbaee87f-e8c6-4e56-9b59-c0f50054c172-kolla-config\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.708146 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbaee87f-e8c6-4e56-9b59-c0f50054c172-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.708198 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzm8p\" (UniqueName: \"kubernetes.io/projected/cbaee87f-e8c6-4e56-9b59-c0f50054c172-kube-api-access-tzm8p\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.708259 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cbaee87f-e8c6-4e56-9b59-c0f50054c172-config-data\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.809893 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzm8p\" (UniqueName: \"kubernetes.io/projected/cbaee87f-e8c6-4e56-9b59-c0f50054c172-kube-api-access-tzm8p\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.809950 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cbaee87f-e8c6-4e56-9b59-c0f50054c172-config-data\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.809989 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbaee87f-e8c6-4e56-9b59-c0f50054c172-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.810016 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cbaee87f-e8c6-4e56-9b59-c0f50054c172-kolla-config\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.810044 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbaee87f-e8c6-4e56-9b59-c0f50054c172-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.810796 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/cbaee87f-e8c6-4e56-9b59-c0f50054c172-kolla-config\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.810882 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/cbaee87f-e8c6-4e56-9b59-c0f50054c172-config-data\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.813169 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbaee87f-e8c6-4e56-9b59-c0f50054c172-memcached-tls-certs\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.813190 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbaee87f-e8c6-4e56-9b59-c0f50054c172-combined-ca-bundle\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.823910 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzm8p\" (UniqueName: \"kubernetes.io/projected/cbaee87f-e8c6-4e56-9b59-c0f50054c172-kube-api-access-tzm8p\") pod \"memcached-0\" (UID: \"cbaee87f-e8c6-4e56-9b59-c0f50054c172\") " pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.929004 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Nov 22 09:16:26 crc kubenswrapper[4693]: I1122 09:16:26.944585 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgkkr"] Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.169529 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.246278 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xpsxb" event={"ID":"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d","Type":"ContainerDied","Data":"cab9818f94f63409de313f627a5db42a563287eefa29fc93fda1d550d68e944a"} Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.246354 4693 scope.go:117] "RemoveContainer" containerID="fbf15b567c70e11c444ade9f4d7030302b2777ca992667a2a2b66745652b5b69" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.247154 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xpsxb" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.315683 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ddkmp\" (UniqueName: \"kubernetes.io/projected/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-kube-api-access-ddkmp\") pod \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.315953 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-utilities\") pod \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.315991 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-catalog-content\") pod \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\" (UID: \"8cd57715-bdd6-4b4c-bae4-acc7c6d7070d\") " Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.316567 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-utilities" (OuterVolumeSpecName: "utilities") pod "8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" (UID: "8cd57715-bdd6-4b4c-bae4-acc7c6d7070d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.320930 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-kube-api-access-ddkmp" (OuterVolumeSpecName: "kube-api-access-ddkmp") pod "8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" (UID: "8cd57715-bdd6-4b4c-bae4-acc7c6d7070d"). InnerVolumeSpecName "kube-api-access-ddkmp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.372198 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" (UID: "8cd57715-bdd6-4b4c-bae4-acc7c6d7070d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.417654 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.417690 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.417701 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ddkmp\" (UniqueName: \"kubernetes.io/projected/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d-kube-api-access-ddkmp\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.572130 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xpsxb"] Nov 22 09:16:27 crc kubenswrapper[4693]: I1122 09:16:27.576305 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xpsxb"] Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.157025 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" path="/var/lib/kubelet/pods/8cd57715-bdd6-4b4c-bae4-acc7c6d7070d/volumes" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.218376 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:16:28 crc kubenswrapper[4693]: E1122 09:16:28.218682 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="registry-server" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.218697 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="registry-server" Nov 22 09:16:28 crc kubenswrapper[4693]: E1122 09:16:28.218710 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="extract-content" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.218716 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="extract-content" Nov 22 09:16:28 crc kubenswrapper[4693]: E1122 09:16:28.218732 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="extract-utilities" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.218737 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="extract-utilities" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.218929 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cd57715-bdd6-4b4c-bae4-acc7c6d7070d" containerName="registry-server" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.219436 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.222041 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-c2sd8" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.236647 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.254299 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vgkkr" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="registry-server" containerID="cri-o://e40e1076b34d65f9eb4d04d3181626ea1fcc9309bdff82c2b8d10505d84a91c6" gracePeriod=2 Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.329820 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lt9zc\" (UniqueName: \"kubernetes.io/projected/5f7105e9-640f-4369-970a-df4123cce84a-kube-api-access-lt9zc\") pod \"kube-state-metrics-0\" (UID: \"5f7105e9-640f-4369-970a-df4123cce84a\") " pod="openstack/kube-state-metrics-0" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.432288 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lt9zc\" (UniqueName: \"kubernetes.io/projected/5f7105e9-640f-4369-970a-df4123cce84a-kube-api-access-lt9zc\") pod \"kube-state-metrics-0\" (UID: \"5f7105e9-640f-4369-970a-df4123cce84a\") " pod="openstack/kube-state-metrics-0" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.445661 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lt9zc\" (UniqueName: \"kubernetes.io/projected/5f7105e9-640f-4369-970a-df4123cce84a-kube-api-access-lt9zc\") pod \"kube-state-metrics-0\" (UID: \"5f7105e9-640f-4369-970a-df4123cce84a\") " pod="openstack/kube-state-metrics-0" Nov 22 09:16:28 crc kubenswrapper[4693]: I1122 09:16:28.536977 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 09:16:29 crc kubenswrapper[4693]: I1122 09:16:29.262271 4693 generic.go:334] "Generic (PLEG): container finished" podID="d07889e1-fb32-44b5-93dd-645802cb80df" containerID="e40e1076b34d65f9eb4d04d3181626ea1fcc9309bdff82c2b8d10505d84a91c6" exitCode=0 Nov 22 09:16:29 crc kubenswrapper[4693]: I1122 09:16:29.262312 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgkkr" event={"ID":"d07889e1-fb32-44b5-93dd-645802cb80df","Type":"ContainerDied","Data":"e40e1076b34d65f9eb4d04d3181626ea1fcc9309bdff82c2b8d10505d84a91c6"} Nov 22 09:16:31 crc kubenswrapper[4693]: I1122 09:16:31.162096 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.200469 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-czsjw"] Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.201635 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.203421 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-nnk9v" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.203524 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.204069 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.204364 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-xk5h2"] Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.205668 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.210313 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-czsjw"] Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.218731 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-xk5h2"] Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386388 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-whqdz\" (UniqueName: \"kubernetes.io/projected/6ebf2de1-2769-47bc-8136-4ff7460b89b1-kube-api-access-whqdz\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386433 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-log\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386455 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-etc-ovs\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386485 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-run\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386509 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ebf2de1-2769-47bc-8136-4ff7460b89b1-scripts\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386526 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rrql9\" (UniqueName: \"kubernetes.io/projected/da762aee-c526-4bbb-a724-9135350b6528-kube-api-access-rrql9\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386540 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ebf2de1-2769-47bc-8136-4ff7460b89b1-ovn-controller-tls-certs\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386559 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf2de1-2769-47bc-8136-4ff7460b89b1-combined-ca-bundle\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386598 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-lib\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386616 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-run-ovn\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386630 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da762aee-c526-4bbb-a724-9135350b6528-scripts\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386643 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-log-ovn\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.386660 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-run\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.488503 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-lib\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.488708 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da762aee-c526-4bbb-a724-9135350b6528-scripts\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.488785 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-run-ovn\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.488871 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-log-ovn\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.488946 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-run\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489045 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-whqdz\" (UniqueName: \"kubernetes.io/projected/6ebf2de1-2769-47bc-8136-4ff7460b89b1-kube-api-access-whqdz\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489111 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-log\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489153 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-log-ovn\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489071 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-lib\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489228 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-run-ovn\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489174 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-etc-ovs\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489339 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/6ebf2de1-2769-47bc-8136-4ff7460b89b1-var-run\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489409 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-etc-ovs\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489297 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-log\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489472 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-run\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489522 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/da762aee-c526-4bbb-a724-9135350b6528-var-run\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489600 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ebf2de1-2769-47bc-8136-4ff7460b89b1-scripts\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489672 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rrql9\" (UniqueName: \"kubernetes.io/projected/da762aee-c526-4bbb-a724-9135350b6528-kube-api-access-rrql9\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489738 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ebf2de1-2769-47bc-8136-4ff7460b89b1-ovn-controller-tls-certs\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.489810 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf2de1-2769-47bc-8136-4ff7460b89b1-combined-ca-bundle\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.491062 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/da762aee-c526-4bbb-a724-9135350b6528-scripts\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.491518 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6ebf2de1-2769-47bc-8136-4ff7460b89b1-scripts\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.495388 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6ebf2de1-2769-47bc-8136-4ff7460b89b1-combined-ca-bundle\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.495959 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/6ebf2de1-2769-47bc-8136-4ff7460b89b1-ovn-controller-tls-certs\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.502040 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-whqdz\" (UniqueName: \"kubernetes.io/projected/6ebf2de1-2769-47bc-8136-4ff7460b89b1-kube-api-access-whqdz\") pod \"ovn-controller-czsjw\" (UID: \"6ebf2de1-2769-47bc-8136-4ff7460b89b1\") " pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.503057 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rrql9\" (UniqueName: \"kubernetes.io/projected/da762aee-c526-4bbb-a724-9135350b6528-kube-api-access-rrql9\") pod \"ovn-controller-ovs-xk5h2\" (UID: \"da762aee-c526-4bbb-a724-9135350b6528\") " pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.518247 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-czsjw" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.523123 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.790717 4693 scope.go:117] "RemoveContainer" containerID="a475d84bc2fb900b2bc4568451d56f2a3f549712e667a0d2f59f756ff5f16273" Nov 22 09:16:32 crc kubenswrapper[4693]: W1122 09:16:32.795784 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc687d537_2713_42cf_9f20_ef640bbd6c3c.slice/crio-a447cb3116dc44bba0590fe10860acd5e9ef50a863af88e970731f931244d8f1 WatchSource:0}: Error finding container a447cb3116dc44bba0590fe10860acd5e9ef50a863af88e970731f931244d8f1: Status 404 returned error can't find the container with id a447cb3116dc44bba0590fe10860acd5e9ef50a863af88e970731f931244d8f1 Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.838493 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.899736 4693 scope.go:117] "RemoveContainer" containerID="78fce862412920005a3b97f5e652bd271488a02c080fbb919f18e202987683a2" Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.997151 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/d07889e1-fb32-44b5-93dd-645802cb80df-kube-api-access-t9zs9\") pod \"d07889e1-fb32-44b5-93dd-645802cb80df\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.997246 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-utilities\") pod \"d07889e1-fb32-44b5-93dd-645802cb80df\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.997329 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-catalog-content\") pod \"d07889e1-fb32-44b5-93dd-645802cb80df\" (UID: \"d07889e1-fb32-44b5-93dd-645802cb80df\") " Nov 22 09:16:32 crc kubenswrapper[4693]: I1122 09:16:32.998624 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-utilities" (OuterVolumeSpecName: "utilities") pod "d07889e1-fb32-44b5-93dd-645802cb80df" (UID: "d07889e1-fb32-44b5-93dd-645802cb80df"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.003176 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d07889e1-fb32-44b5-93dd-645802cb80df-kube-api-access-t9zs9" (OuterVolumeSpecName: "kube-api-access-t9zs9") pod "d07889e1-fb32-44b5-93dd-645802cb80df" (UID: "d07889e1-fb32-44b5-93dd-645802cb80df"). InnerVolumeSpecName "kube-api-access-t9zs9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.011546 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d07889e1-fb32-44b5-93dd-645802cb80df" (UID: "d07889e1-fb32-44b5-93dd-645802cb80df"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.098337 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.098363 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d07889e1-fb32-44b5-93dd-645802cb80df-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.098373 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/d07889e1-fb32-44b5-93dd-645802cb80df-kube-api-access-t9zs9\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.297112 4693 generic.go:334] "Generic (PLEG): container finished" podID="6b389e12-2848-4088-b36d-5943826ff156" containerID="70185555139a646761d5969b440967952ccf4e1c8297d7844c0eaf7107f6d3f7" exitCode=0 Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.297399 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-bb8br" event={"ID":"6b389e12-2848-4088-b36d-5943826ff156","Type":"ContainerDied","Data":"70185555139a646761d5969b440967952ccf4e1c8297d7844c0eaf7107f6d3f7"} Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.314774 4693 generic.go:334] "Generic (PLEG): container finished" podID="ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" containerID="6ad20820c6d4b38a533f75ec3fedc6ba27da7748d2148cd87665265fa3988766" exitCode=0 Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.314965 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" event={"ID":"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b","Type":"ContainerDied","Data":"6ad20820c6d4b38a533f75ec3fedc6ba27da7748d2148cd87665265fa3988766"} Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.324819 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c687d537-2713-42cf-9f20-ef640bbd6c3c","Type":"ContainerStarted","Data":"a447cb3116dc44bba0590fe10860acd5e9ef50a863af88e970731f931244d8f1"} Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.334419 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.342757 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-nd8zf"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.345893 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vgkkr" event={"ID":"d07889e1-fb32-44b5-93dd-645802cb80df","Type":"ContainerDied","Data":"50cd6af7dd668dd2fb5a3ce2e00c80c7fd771c753e1243ab22862a3d8fcbcbea"} Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.345931 4693 scope.go:117] "RemoveContainer" containerID="e40e1076b34d65f9eb4d04d3181626ea1fcc9309bdff82c2b8d10505d84a91c6" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.346010 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vgkkr" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.352497 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:16:33 crc kubenswrapper[4693]: W1122 09:16:33.354893 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod917b4902_7f7c_4344_a0dc_26262efb2c82.slice/crio-bf48326ec2dd9c2b8a1917b71dd197289bd16cd8947e320a13c7a9f9e56d94b3 WatchSource:0}: Error finding container bf48326ec2dd9c2b8a1917b71dd197289bd16cd8947e320a13c7a9f9e56d94b3: Status 404 returned error can't find the container with id bf48326ec2dd9c2b8a1917b71dd197289bd16cd8947e320a13c7a9f9e56d94b3 Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.357658 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.361182 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-nlqz5"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.374270 4693 scope.go:117] "RemoveContainer" containerID="fd51e568ef849e087197981178ae825bdad4891a533a349b60539eba0344d19b" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.379002 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgkkr"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.383092 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vgkkr"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.405649 4693 scope.go:117] "RemoveContainer" containerID="f43b91cf9c2a421408f5c754a4c09c88fed459adb73e610b4b294078e22c6395" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.506790 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:16:33 crc kubenswrapper[4693]: W1122 09:16:33.521005 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f7105e9_640f_4369_970a_df4123cce84a.slice/crio-425088be5d5984b7711cc4a69d6c0766c320a61538029070e62bb826a2547dd7 WatchSource:0}: Error finding container 425088be5d5984b7711cc4a69d6c0766c320a61538029070e62bb826a2547dd7: Status 404 returned error can't find the container with id 425088be5d5984b7711cc4a69d6c0766c320a61538029070e62bb826a2547dd7 Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.523936 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Nov 22 09:16:33 crc kubenswrapper[4693]: W1122 09:16:33.524025 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbaee87f_e8c6_4e56_9b59_c0f50054c172.slice/crio-e7f83262bb88a23bcd38dc49abb09f41acfc09f38f83e22455b83f21bf7255ae WatchSource:0}: Error finding container e7f83262bb88a23bcd38dc49abb09f41acfc09f38f83e22455b83f21bf7255ae: Status 404 returned error can't find the container with id e7f83262bb88a23bcd38dc49abb09f41acfc09f38f83e22455b83f21bf7255ae Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.606975 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-czsjw"] Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.729650 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-xk5h2"] Nov 22 09:16:33 crc kubenswrapper[4693]: W1122 09:16:33.806345 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podda762aee_c526_4bbb_a724_9135350b6528.slice/crio-a1bc232cfc0aa032442d8e9bbcefeee46de96d54205472550d840a388eb9359b WatchSource:0}: Error finding container a1bc232cfc0aa032442d8e9bbcefeee46de96d54205472550d840a388eb9359b: Status 404 returned error can't find the container with id a1bc232cfc0aa032442d8e9bbcefeee46de96d54205472550d840a388eb9359b Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.809123 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.813366 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.907855 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-dns-svc\") pod \"6b389e12-2848-4088-b36d-5943826ff156\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.908003 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ghk88\" (UniqueName: \"kubernetes.io/projected/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-kube-api-access-ghk88\") pod \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.908035 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qglj8\" (UniqueName: \"kubernetes.io/projected/6b389e12-2848-4088-b36d-5943826ff156-kube-api-access-qglj8\") pod \"6b389e12-2848-4088-b36d-5943826ff156\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.908069 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-config\") pod \"6b389e12-2848-4088-b36d-5943826ff156\" (UID: \"6b389e12-2848-4088-b36d-5943826ff156\") " Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.908121 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-config\") pod \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\" (UID: \"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b\") " Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.913283 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-kube-api-access-ghk88" (OuterVolumeSpecName: "kube-api-access-ghk88") pod "ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" (UID: "ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b"). InnerVolumeSpecName "kube-api-access-ghk88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.913597 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b389e12-2848-4088-b36d-5943826ff156-kube-api-access-qglj8" (OuterVolumeSpecName: "kube-api-access-qglj8") pod "6b389e12-2848-4088-b36d-5943826ff156" (UID: "6b389e12-2848-4088-b36d-5943826ff156"). InnerVolumeSpecName "kube-api-access-qglj8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.924713 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-config" (OuterVolumeSpecName: "config") pod "ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" (UID: "ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.926822 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6b389e12-2848-4088-b36d-5943826ff156" (UID: "6b389e12-2848-4088-b36d-5943826ff156"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.928651 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-config" (OuterVolumeSpecName: "config") pod "6b389e12-2848-4088-b36d-5943826ff156" (UID: "6b389e12-2848-4088-b36d-5943826ff156"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.975983 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-h8klh"] Nov 22 09:16:33 crc kubenswrapper[4693]: E1122 09:16:33.976272 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="extract-content" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976288 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="extract-content" Nov 22 09:16:33 crc kubenswrapper[4693]: E1122 09:16:33.976303 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="registry-server" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976310 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="registry-server" Nov 22 09:16:33 crc kubenswrapper[4693]: E1122 09:16:33.976330 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="extract-utilities" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976335 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="extract-utilities" Nov 22 09:16:33 crc kubenswrapper[4693]: E1122 09:16:33.976343 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b389e12-2848-4088-b36d-5943826ff156" containerName="init" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976348 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b389e12-2848-4088-b36d-5943826ff156" containerName="init" Nov 22 09:16:33 crc kubenswrapper[4693]: E1122 09:16:33.976359 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" containerName="init" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976364 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" containerName="init" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976487 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b389e12-2848-4088-b36d-5943826ff156" containerName="init" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976500 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" containerName="init" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976509 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" containerName="registry-server" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.976969 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.982902 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.983312 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Nov 22 09:16:33 crc kubenswrapper[4693]: I1122 09:16:33.988422 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-h8klh"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.009878 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.009904 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.009914 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ghk88\" (UniqueName: \"kubernetes.io/projected/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b-kube-api-access-ghk88\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.009925 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qglj8\" (UniqueName: \"kubernetes.io/projected/6b389e12-2848-4088-b36d-5943826ff156-kube-api-access-qglj8\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.009934 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6b389e12-2848-4088-b36d-5943826ff156-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.111333 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-combined-ca-bundle\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.111377 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.111525 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-ovs-rundir\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.111567 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-config\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.111604 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxm2p\" (UniqueName: \"kubernetes.io/projected/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-kube-api-access-jxm2p\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.111662 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-ovn-rundir\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.162302 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d07889e1-fb32-44b5-93dd-645802cb80df" path="/var/lib/kubelet/pods/d07889e1-fb32-44b5-93dd-645802cb80df/volumes" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.212738 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-ovs-rundir\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.212785 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-config\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.212824 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxm2p\" (UniqueName: \"kubernetes.io/projected/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-kube-api-access-jxm2p\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.212874 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-ovn-rundir\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.212911 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-combined-ca-bundle\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.212929 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.213559 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-ovs-rundir\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.213628 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-ovn-rundir\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.214165 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-config\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.218471 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.219092 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-combined-ca-bundle\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.231918 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxm2p\" (UniqueName: \"kubernetes.io/projected/9f9e4335-a9aa-4a2c-8300-25680a90ab8a-kube-api-access-jxm2p\") pod \"ovn-controller-metrics-h8klh\" (UID: \"9f9e4335-a9aa-4a2c-8300-25680a90ab8a\") " pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.296591 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-h8klh" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.359102 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xk5h2" event={"ID":"da762aee-c526-4bbb-a724-9135350b6528","Type":"ContainerStarted","Data":"a1bc232cfc0aa032442d8e9bbcefeee46de96d54205472550d840a388eb9359b"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.360137 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"957b6aef-f771-4011-b712-e53794ad836a","Type":"ContainerStarted","Data":"9e3401efa90808e9483f26ce50d29e76826ba2429cd76acb697634f0c948fe93"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.361390 4693 generic.go:334] "Generic (PLEG): container finished" podID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerID="85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af" exitCode=0 Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.361443 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" event={"ID":"21e862c1-def8-4f98-ae0a-8fdd19e37e32","Type":"ContainerDied","Data":"85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.361460 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" event={"ID":"21e862c1-def8-4f98-ae0a-8fdd19e37e32","Type":"ContainerStarted","Data":"b3a23e93f8fee5ab526fc5c3e82818e9d2d6908ebd946227b397087083a2542c"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.362600 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6584b49599-bb8br" event={"ID":"6b389e12-2848-4088-b36d-5943826ff156","Type":"ContainerDied","Data":"a46263f53a450ceff2dc948634df1b4e5e827e44c0da85f85bbe9a1e631f049f"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.362639 4693 scope.go:117] "RemoveContainer" containerID="70185555139a646761d5969b440967952ccf4e1c8297d7844c0eaf7107f6d3f7" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.362608 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6584b49599-bb8br" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.365596 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" event={"ID":"ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b","Type":"ContainerDied","Data":"1bd0555aa16e41d201de03d98df9f2f3382161a48d3f26f825e2831fccebcee6"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.365613 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7bdd77c89-5nnlr" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.367817 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-czsjw" event={"ID":"6ebf2de1-2769-47bc-8136-4ff7460b89b1","Type":"ContainerStarted","Data":"4ea08b986e79a285c09958963c6ff9f19e9a3a6b68dd3ec79a71fc4b6f6ebca6"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.370189 4693 generic.go:334] "Generic (PLEG): container finished" podID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerID="ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84" exitCode=0 Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.370249 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" event={"ID":"917b4902-7f7c-4344-a0dc-26262efb2c82","Type":"ContainerDied","Data":"ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.370267 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" event={"ID":"917b4902-7f7c-4344-a0dc-26262efb2c82","Type":"ContainerStarted","Data":"bf48326ec2dd9c2b8a1917b71dd197289bd16cd8947e320a13c7a9f9e56d94b3"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.372556 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"99df5d88-540a-495c-a688-43f4d63ffa45","Type":"ContainerStarted","Data":"2db6bd12d446cd26dc981f1920e57e3d5c5c972f118793e0f0c4b2d55d3482e3"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.375364 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cbaee87f-e8c6-4e56-9b59-c0f50054c172","Type":"ContainerStarted","Data":"e7f83262bb88a23bcd38dc49abb09f41acfc09f38f83e22455b83f21bf7255ae"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.376630 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5f7105e9-640f-4369-970a-df4123cce84a","Type":"ContainerStarted","Data":"425088be5d5984b7711cc4a69d6c0766c320a61538029070e62bb826a2547dd7"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.377679 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d64e5e3-8006-4833-a05d-705799403cc2","Type":"ContainerStarted","Data":"d29b57a5be56deb289237cb68bac8baccc33e9684a2a4da0689fb6770a2e4c17"} Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.411364 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-bb8br"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.415114 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6584b49599-bb8br"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.443469 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-5nnlr"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.449126 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7bdd77c89-5nnlr"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.471415 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.472693 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.479584 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-76tkf" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.479775 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.480048 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.481438 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.490983 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.597204 4693 scope.go:117] "RemoveContainer" containerID="6ad20820c6d4b38a533f75ec3fedc6ba27da7748d2148cd87665265fa3988766" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618150 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618262 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-27jxl\" (UniqueName: \"kubernetes.io/projected/db7c316e-a7ee-4c1a-a663-b02279df3b3e-kube-api-access-27jxl\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618302 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/db7c316e-a7ee-4c1a-a663-b02279df3b3e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618401 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/db7c316e-a7ee-4c1a-a663-b02279df3b3e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618435 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618477 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618504 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db7c316e-a7ee-4c1a-a663-b02279df3b3e-config\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.618525 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.719792 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/db7c316e-a7ee-4c1a-a663-b02279df3b3e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.719856 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.719902 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.719923 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db7c316e-a7ee-4c1a-a663-b02279df3b3e-config\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.719941 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.720087 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.720169 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-27jxl\" (UniqueName: \"kubernetes.io/projected/db7c316e-a7ee-4c1a-a663-b02279df3b3e-kube-api-access-27jxl\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.720200 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/db7c316e-a7ee-4c1a-a663-b02279df3b3e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.720307 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/db7c316e-a7ee-4c1a-a663-b02279df3b3e-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.720345 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.721691 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/db7c316e-a7ee-4c1a-a663-b02279df3b3e-config\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.721720 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/db7c316e-a7ee-4c1a-a663-b02279df3b3e-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.734001 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.735748 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.735811 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/db7c316e-a7ee-4c1a-a663-b02279df3b3e-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.736692 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-27jxl\" (UniqueName: \"kubernetes.io/projected/db7c316e-a7ee-4c1a-a663-b02279df3b3e-kube-api-access-27jxl\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.743470 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"ovsdbserver-sb-0\" (UID: \"db7c316e-a7ee-4c1a-a663-b02279df3b3e\") " pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:34 crc kubenswrapper[4693]: I1122 09:16:34.794127 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.108151 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.112260 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.114610 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-4l56m" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.114769 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.114958 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.115067 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.121632 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.231522 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.231936 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.231973 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/645f7714-f41d-4ece-85ef-8043bc2ca51d-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.232005 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/645f7714-f41d-4ece-85ef-8043bc2ca51d-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.232029 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.232055 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/645f7714-f41d-4ece-85ef-8043bc2ca51d-config\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.232097 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.232122 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mn86h\" (UniqueName: \"kubernetes.io/projected/645f7714-f41d-4ece-85ef-8043bc2ca51d-kube-api-access-mn86h\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.333966 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334014 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/645f7714-f41d-4ece-85ef-8043bc2ca51d-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334059 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/645f7714-f41d-4ece-85ef-8043bc2ca51d-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334085 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334106 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/645f7714-f41d-4ece-85ef-8043bc2ca51d-config\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334144 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334163 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mn86h\" (UniqueName: \"kubernetes.io/projected/645f7714-f41d-4ece-85ef-8043bc2ca51d-kube-api-access-mn86h\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334191 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.334273 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") device mount path \"/mnt/openstack/pv10\"" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.335956 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/645f7714-f41d-4ece-85ef-8043bc2ca51d-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.338625 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/645f7714-f41d-4ece-85ef-8043bc2ca51d-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.339326 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/645f7714-f41d-4ece-85ef-8043bc2ca51d-config\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.339480 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.340771 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.341086 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/645f7714-f41d-4ece-85ef-8043bc2ca51d-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.349549 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mn86h\" (UniqueName: \"kubernetes.io/projected/645f7714-f41d-4ece-85ef-8043bc2ca51d-kube-api-access-mn86h\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.352737 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage10-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage10-crc\") pod \"ovsdbserver-nb-0\" (UID: \"645f7714-f41d-4ece-85ef-8043bc2ca51d\") " pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.396522 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-h8klh"] Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.430087 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:35 crc kubenswrapper[4693]: I1122 09:16:35.679947 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Nov 22 09:16:35 crc kubenswrapper[4693]: W1122 09:16:35.810673 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddb7c316e_a7ee_4c1a_a663_b02279df3b3e.slice/crio-fe08d470601b59edc49135bf3e68a4d7b2461df6a01a10a5adaf9d66b342aa41 WatchSource:0}: Error finding container fe08d470601b59edc49135bf3e68a4d7b2461df6a01a10a5adaf9d66b342aa41: Status 404 returned error can't find the container with id fe08d470601b59edc49135bf3e68a4d7b2461df6a01a10a5adaf9d66b342aa41 Nov 22 09:16:36 crc kubenswrapper[4693]: I1122 09:16:36.154355 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b389e12-2848-4088-b36d-5943826ff156" path="/var/lib/kubelet/pods/6b389e12-2848-4088-b36d-5943826ff156/volumes" Nov 22 09:16:36 crc kubenswrapper[4693]: I1122 09:16:36.154971 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b" path="/var/lib/kubelet/pods/ebe43d74-2a02-49cc-93d4-d0f4f1cc3b1b/volumes" Nov 22 09:16:36 crc kubenswrapper[4693]: I1122 09:16:36.418746 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"db7c316e-a7ee-4c1a-a663-b02279df3b3e","Type":"ContainerStarted","Data":"fe08d470601b59edc49135bf3e68a4d7b2461df6a01a10a5adaf9d66b342aa41"} Nov 22 09:16:36 crc kubenswrapper[4693]: I1122 09:16:36.420085 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h8klh" event={"ID":"9f9e4335-a9aa-4a2c-8300-25680a90ab8a","Type":"ContainerStarted","Data":"5c15cd685e2243a1cc6f85151f37451e364894d28439983bc8ec12f856430a3c"} Nov 22 09:16:39 crc kubenswrapper[4693]: I1122 09:16:39.436981 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" event={"ID":"21e862c1-def8-4f98-ae0a-8fdd19e37e32","Type":"ContainerStarted","Data":"59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4"} Nov 22 09:16:39 crc kubenswrapper[4693]: I1122 09:16:39.438086 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:39 crc kubenswrapper[4693]: I1122 09:16:39.454514 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" podStartSLOduration=17.454502862 podStartE2EDuration="17.454502862s" podCreationTimestamp="2025-11-22 09:16:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:16:39.448860968 +0000 UTC m=+795.591363259" watchObservedRunningTime="2025-11-22 09:16:39.454502862 +0000 UTC m=+795.597005154" Nov 22 09:16:39 crc kubenswrapper[4693]: I1122 09:16:39.492744 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Nov 22 09:16:40 crc kubenswrapper[4693]: W1122 09:16:40.447098 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod645f7714_f41d_4ece_85ef_8043bc2ca51d.slice/crio-c7ba80e43f80e2238d6adac83303cf661f3e4afef5422eb94da5788a598325f2 WatchSource:0}: Error finding container c7ba80e43f80e2238d6adac83303cf661f3e4afef5422eb94da5788a598325f2: Status 404 returned error can't find the container with id c7ba80e43f80e2238d6adac83303cf661f3e4afef5422eb94da5788a598325f2 Nov 22 09:16:41 crc kubenswrapper[4693]: I1122 09:16:41.458822 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" event={"ID":"917b4902-7f7c-4344-a0dc-26262efb2c82","Type":"ContainerStarted","Data":"1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032"} Nov 22 09:16:41 crc kubenswrapper[4693]: I1122 09:16:41.459098 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:41 crc kubenswrapper[4693]: I1122 09:16:41.461195 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"645f7714-f41d-4ece-85ef-8043bc2ca51d","Type":"ContainerStarted","Data":"c7ba80e43f80e2238d6adac83303cf661f3e4afef5422eb94da5788a598325f2"} Nov 22 09:16:41 crc kubenswrapper[4693]: I1122 09:16:41.475784 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" podStartSLOduration=20.475770171 podStartE2EDuration="20.475770171s" podCreationTimestamp="2025-11-22 09:16:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:16:41.470799287 +0000 UTC m=+797.613301579" watchObservedRunningTime="2025-11-22 09:16:41.475770171 +0000 UTC m=+797.618272461" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.472177 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5f7105e9-640f-4369-970a-df4123cce84a","Type":"ContainerStarted","Data":"783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.472592 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.473581 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xk5h2" event={"ID":"da762aee-c526-4bbb-a724-9135350b6528","Type":"ContainerStarted","Data":"e94c764de99f9e223e2fc5ac0e7235ce8dbf24c80461575f4cdfb4a50f07e0b0"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.475013 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"cbaee87f-e8c6-4e56-9b59-c0f50054c172","Type":"ContainerStarted","Data":"c1e358a7d6a3e5e8d8b3c80e734f80327be1b0dbd0a0e8bbaa3bec348a101211"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.475113 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.476238 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-czsjw" event={"ID":"6ebf2de1-2769-47bc-8136-4ff7460b89b1","Type":"ContainerStarted","Data":"e01d43654931b4533f99826a0e80c7c1fcecad6c5eb31a79f877d1db350b5b5f"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.476367 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-czsjw" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.477437 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-h8klh" event={"ID":"9f9e4335-a9aa-4a2c-8300-25680a90ab8a","Type":"ContainerStarted","Data":"c22ab40bb9ee73f9dc4d6f8dcc1cf157fd057c90a8211ee993b4147fde486052"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.478559 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d64e5e3-8006-4833-a05d-705799403cc2","Type":"ContainerStarted","Data":"28d3882443521ddbc1d9e3477dcee97830895dcea10383382f852f84464dbb1e"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.479671 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"957b6aef-f771-4011-b712-e53794ad836a","Type":"ContainerStarted","Data":"e91b47279f2d3609988cdb0867e0d9efff455f0cb4605f3edb4225f90765baa3"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.480861 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"645f7714-f41d-4ece-85ef-8043bc2ca51d","Type":"ContainerStarted","Data":"ee6dd53db777c5485d04fad945d6a405c674b0ec0f8ed7c59e4f3d980c829924"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.482180 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"db7c316e-a7ee-4c1a-a663-b02279df3b3e","Type":"ContainerStarted","Data":"2b61b93166e841e56a87eb2285af48cf4c622a06d31dc40a2d35211b607f7676"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.482222 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"db7c316e-a7ee-4c1a-a663-b02279df3b3e","Type":"ContainerStarted","Data":"f17abec0041dd164491747cce6c39926ec90a495dd435d9ff4438f88c0ec0d3f"} Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.488117 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=9.924521003 podStartE2EDuration="15.488108136s" podCreationTimestamp="2025-11-22 09:16:28 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.526608478 +0000 UTC m=+789.669110769" lastFinishedPulling="2025-11-22 09:16:39.090195612 +0000 UTC m=+795.232697902" observedRunningTime="2025-11-22 09:16:43.48374253 +0000 UTC m=+799.626244821" watchObservedRunningTime="2025-11-22 09:16:43.488108136 +0000 UTC m=+799.630610427" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.521204 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-h8klh" podStartSLOduration=3.711605311 podStartE2EDuration="10.521190137s" podCreationTimestamp="2025-11-22 09:16:33 +0000 UTC" firstStartedPulling="2025-11-22 09:16:35.820318629 +0000 UTC m=+791.962820921" lastFinishedPulling="2025-11-22 09:16:42.629903456 +0000 UTC m=+798.772405747" observedRunningTime="2025-11-22 09:16:43.501377827 +0000 UTC m=+799.643880118" watchObservedRunningTime="2025-11-22 09:16:43.521190137 +0000 UTC m=+799.663692428" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.521431 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-czsjw" podStartSLOduration=2.520906849 podStartE2EDuration="11.521428084s" podCreationTimestamp="2025-11-22 09:16:32 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.620929354 +0000 UTC m=+789.763431644" lastFinishedPulling="2025-11-22 09:16:42.621450589 +0000 UTC m=+798.763952879" observedRunningTime="2025-11-22 09:16:43.520015328 +0000 UTC m=+799.662517619" watchObservedRunningTime="2025-11-22 09:16:43.521428084 +0000 UTC m=+799.663930375" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.577809 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=12.000248991 podStartE2EDuration="17.577793729s" podCreationTimestamp="2025-11-22 09:16:26 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.527942816 +0000 UTC m=+789.670445107" lastFinishedPulling="2025-11-22 09:16:39.105487555 +0000 UTC m=+795.247989845" observedRunningTime="2025-11-22 09:16:43.536116963 +0000 UTC m=+799.678619254" watchObservedRunningTime="2025-11-22 09:16:43.577793729 +0000 UTC m=+799.720296020" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.580837 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=3.798376393 podStartE2EDuration="10.580830225s" podCreationTimestamp="2025-11-22 09:16:33 +0000 UTC" firstStartedPulling="2025-11-22 09:16:35.819974563 +0000 UTC m=+791.962476854" lastFinishedPulling="2025-11-22 09:16:42.602428395 +0000 UTC m=+798.744930686" observedRunningTime="2025-11-22 09:16:43.573906692 +0000 UTC m=+799.716408983" watchObservedRunningTime="2025-11-22 09:16:43.580830225 +0000 UTC m=+799.723332507" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.794656 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.808684 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-nd8zf"] Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.808986 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerName="dnsmasq-dns" containerID="cri-o://1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032" gracePeriod=10 Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.828976 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-4lrk6"] Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.830114 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.833433 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.835479 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-4lrk6"] Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.971490 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-dns-svc\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.971729 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g96x\" (UniqueName: \"kubernetes.io/projected/83d4df43-be89-47f3-9a70-c0e5c7751e60-kube-api-access-8g96x\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.971784 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-config\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:43 crc kubenswrapper[4693]: I1122 09:16:43.971818 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-ovsdbserver-sb\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.073534 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-config\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.073592 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-ovsdbserver-sb\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.073618 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-dns-svc\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.073666 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g96x\" (UniqueName: \"kubernetes.io/projected/83d4df43-be89-47f3-9a70-c0e5c7751e60-kube-api-access-8g96x\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.074582 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-config\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.074899 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-ovsdbserver-sb\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.075137 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-dns-svc\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.075891 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-nlqz5"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.076047 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerName="dnsmasq-dns" containerID="cri-o://59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4" gracePeriod=10 Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.082152 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.097235 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-z9tqf"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.098279 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.098347 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g96x\" (UniqueName: \"kubernetes.io/projected/83d4df43-be89-47f3-9a70-c0e5c7751e60-kube-api-access-8g96x\") pod \"dnsmasq-dns-5848494dd9-4lrk6\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.100403 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.107502 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-z9tqf"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.175420 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.175556 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.175682 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kqh9s\" (UniqueName: \"kubernetes.io/projected/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-kube-api-access-kqh9s\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.175805 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-config\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.175860 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.206269 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.242578 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.278545 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nhplq\" (UniqueName: \"kubernetes.io/projected/917b4902-7f7c-4344-a0dc-26262efb2c82-kube-api-access-nhplq\") pod \"917b4902-7f7c-4344-a0dc-26262efb2c82\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.278622 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-config\") pod \"917b4902-7f7c-4344-a0dc-26262efb2c82\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.278721 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-dns-svc\") pod \"917b4902-7f7c-4344-a0dc-26262efb2c82\" (UID: \"917b4902-7f7c-4344-a0dc-26262efb2c82\") " Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.279052 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kqh9s\" (UniqueName: \"kubernetes.io/projected/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-kube-api-access-kqh9s\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.279125 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-config\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.279150 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.279232 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.279261 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.280292 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-nb\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.281997 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-config\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.282264 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-sb\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.282603 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/917b4902-7f7c-4344-a0dc-26262efb2c82-kube-api-access-nhplq" (OuterVolumeSpecName: "kube-api-access-nhplq") pod "917b4902-7f7c-4344-a0dc-26262efb2c82" (UID: "917b4902-7f7c-4344-a0dc-26262efb2c82"). InnerVolumeSpecName "kube-api-access-nhplq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.289122 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-dns-svc\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.303203 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kqh9s\" (UniqueName: \"kubernetes.io/projected/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-kube-api-access-kqh9s\") pod \"dnsmasq-dns-5c7b6b5695-z9tqf\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.306942 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "917b4902-7f7c-4344-a0dc-26262efb2c82" (UID: "917b4902-7f7c-4344-a0dc-26262efb2c82"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.309984 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-config" (OuterVolumeSpecName: "config") pod "917b4902-7f7c-4344-a0dc-26262efb2c82" (UID: "917b4902-7f7c-4344-a0dc-26262efb2c82"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.384729 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.384751 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nhplq\" (UniqueName: \"kubernetes.io/projected/917b4902-7f7c-4344-a0dc-26262efb2c82-kube-api-access-nhplq\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.384763 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/917b4902-7f7c-4344-a0dc-26262efb2c82-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.419150 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.491149 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"99df5d88-540a-495c-a688-43f4d63ffa45","Type":"ContainerStarted","Data":"b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.500266 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"645f7714-f41d-4ece-85ef-8043bc2ca51d","Type":"ContainerStarted","Data":"ba731135a31ea302fdae7f92d012764da67d673a57bf488a2025efc7924eada5"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.504304 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.505041 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c687d537-2713-42cf-9f20-ef640bbd6c3c","Type":"ContainerStarted","Data":"6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.510214 4693 generic.go:334] "Generic (PLEG): container finished" podID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerID="1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032" exitCode=0 Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.510255 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.510308 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" event={"ID":"917b4902-7f7c-4344-a0dc-26262efb2c82","Type":"ContainerDied","Data":"1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.510369 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7c6d9948dc-nd8zf" event={"ID":"917b4902-7f7c-4344-a0dc-26262efb2c82","Type":"ContainerDied","Data":"bf48326ec2dd9c2b8a1917b71dd197289bd16cd8947e320a13c7a9f9e56d94b3"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.510389 4693 scope.go:117] "RemoveContainer" containerID="1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.512399 4693 generic.go:334] "Generic (PLEG): container finished" podID="da762aee-c526-4bbb-a724-9135350b6528" containerID="e94c764de99f9e223e2fc5ac0e7235ce8dbf24c80461575f4cdfb4a50f07e0b0" exitCode=0 Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.512461 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xk5h2" event={"ID":"da762aee-c526-4bbb-a724-9135350b6528","Type":"ContainerDied","Data":"e94c764de99f9e223e2fc5ac0e7235ce8dbf24c80461575f4cdfb4a50f07e0b0"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.514254 4693 generic.go:334] "Generic (PLEG): container finished" podID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerID="59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4" exitCode=0 Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.514286 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.514330 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" event={"ID":"21e862c1-def8-4f98-ae0a-8fdd19e37e32","Type":"ContainerDied","Data":"59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.514349 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6486446b9f-nlqz5" event={"ID":"21e862c1-def8-4f98-ae0a-8fdd19e37e32","Type":"ContainerDied","Data":"b3a23e93f8fee5ab526fc5c3e82818e9d2d6908ebd946227b397087083a2542c"} Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.518949 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=7.751414652 podStartE2EDuration="10.518939914s" podCreationTimestamp="2025-11-22 09:16:34 +0000 UTC" firstStartedPulling="2025-11-22 09:16:40.451805791 +0000 UTC m=+796.594308082" lastFinishedPulling="2025-11-22 09:16:43.219331053 +0000 UTC m=+799.361833344" observedRunningTime="2025-11-22 09:16:44.515462919 +0000 UTC m=+800.657965210" watchObservedRunningTime="2025-11-22 09:16:44.518939914 +0000 UTC m=+800.661442205" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.539978 4693 scope.go:117] "RemoveContainer" containerID="ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.572366 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-nd8zf"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.577053 4693 scope.go:117] "RemoveContainer" containerID="1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.577786 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7c6d9948dc-nd8zf"] Nov 22 09:16:44 crc kubenswrapper[4693]: E1122 09:16:44.579306 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032\": container with ID starting with 1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032 not found: ID does not exist" containerID="1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.579343 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032"} err="failed to get container status \"1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032\": rpc error: code = NotFound desc = could not find container \"1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032\": container with ID starting with 1cd0c079feb1e16d724c342aa98709027f2279f57998a3b80e601c46a7e17032 not found: ID does not exist" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.579374 4693 scope.go:117] "RemoveContainer" containerID="ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84" Nov 22 09:16:44 crc kubenswrapper[4693]: E1122 09:16:44.579629 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84\": container with ID starting with ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84 not found: ID does not exist" containerID="ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.579653 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84"} err="failed to get container status \"ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84\": rpc error: code = NotFound desc = could not find container \"ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84\": container with ID starting with ef60d34106aa24004716fcbe8638a7f5abadc1e45fada198b416c17b2df58d84 not found: ID does not exist" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.579668 4693 scope.go:117] "RemoveContainer" containerID="59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.587815 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-dns-svc\") pod \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.587883 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khdjb\" (UniqueName: \"kubernetes.io/projected/21e862c1-def8-4f98-ae0a-8fdd19e37e32-kube-api-access-khdjb\") pod \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.587920 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-config\") pod \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\" (UID: \"21e862c1-def8-4f98-ae0a-8fdd19e37e32\") " Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.591586 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/21e862c1-def8-4f98-ae0a-8fdd19e37e32-kube-api-access-khdjb" (OuterVolumeSpecName: "kube-api-access-khdjb") pod "21e862c1-def8-4f98-ae0a-8fdd19e37e32" (UID: "21e862c1-def8-4f98-ae0a-8fdd19e37e32"). InnerVolumeSpecName "kube-api-access-khdjb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.618033 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-config" (OuterVolumeSpecName: "config") pod "21e862c1-def8-4f98-ae0a-8fdd19e37e32" (UID: "21e862c1-def8-4f98-ae0a-8fdd19e37e32"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.619808 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "21e862c1-def8-4f98-ae0a-8fdd19e37e32" (UID: "21e862c1-def8-4f98-ae0a-8fdd19e37e32"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.641197 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-4lrk6"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.670188 4693 scope.go:117] "RemoveContainer" containerID="85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.689909 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.689935 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khdjb\" (UniqueName: \"kubernetes.io/projected/21e862c1-def8-4f98-ae0a-8fdd19e37e32-kube-api-access-khdjb\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.689947 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/21e862c1-def8-4f98-ae0a-8fdd19e37e32-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.711804 4693 scope.go:117] "RemoveContainer" containerID="59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4" Nov 22 09:16:44 crc kubenswrapper[4693]: E1122 09:16:44.712121 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4\": container with ID starting with 59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4 not found: ID does not exist" containerID="59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.712150 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4"} err="failed to get container status \"59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4\": rpc error: code = NotFound desc = could not find container \"59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4\": container with ID starting with 59cbaa6af33d99b09f3d4ee7e3caff140aeb0994b688699ea8d0efe5c4c142d4 not found: ID does not exist" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.712168 4693 scope.go:117] "RemoveContainer" containerID="85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af" Nov 22 09:16:44 crc kubenswrapper[4693]: E1122 09:16:44.712457 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af\": container with ID starting with 85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af not found: ID does not exist" containerID="85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.712476 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af"} err="failed to get container status \"85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af\": rpc error: code = NotFound desc = could not find container \"85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af\": container with ID starting with 85d60c3f08ab6ce11b8ecea0cd4ee2cc135e5e953bd4615cc38cffb12f47b1af not found: ID does not exist" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.794203 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.869979 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-nlqz5"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.875367 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6486446b9f-nlqz5"] Nov 22 09:16:44 crc kubenswrapper[4693]: I1122 09:16:44.914226 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-z9tqf"] Nov 22 09:16:44 crc kubenswrapper[4693]: W1122 09:16:44.921832 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8932da5d_07b8_41e0_a4a9_45dfc65fdd54.slice/crio-af0f157a52a672b482564f95e6ef7153700e73a31c0cd7405d9f583994c2accb WatchSource:0}: Error finding container af0f157a52a672b482564f95e6ef7153700e73a31c0cd7405d9f583994c2accb: Status 404 returned error can't find the container with id af0f157a52a672b482564f95e6ef7153700e73a31c0cd7405d9f583994c2accb Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.431171 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.521417 4693 generic.go:334] "Generic (PLEG): container finished" podID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerID="696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7" exitCode=0 Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.521509 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" event={"ID":"8932da5d-07b8-41e0-a4a9-45dfc65fdd54","Type":"ContainerDied","Data":"696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7"} Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.521693 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" event={"ID":"8932da5d-07b8-41e0-a4a9-45dfc65fdd54","Type":"ContainerStarted","Data":"af0f157a52a672b482564f95e6ef7153700e73a31c0cd7405d9f583994c2accb"} Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.523676 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xk5h2" event={"ID":"da762aee-c526-4bbb-a724-9135350b6528","Type":"ContainerStarted","Data":"560e47c8e5ddade5128f64438b84bdcae4a9882d3b30a0cffb09eb7e6b4378f5"} Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.523705 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-xk5h2" event={"ID":"da762aee-c526-4bbb-a724-9135350b6528","Type":"ContainerStarted","Data":"fa6118c3f87e96e3119769a609452662f7e7411a49067e73915b3a0ef17cd40d"} Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.523754 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.523793 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.525867 4693 generic.go:334] "Generic (PLEG): container finished" podID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerID="3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7" exitCode=0 Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.525944 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" event={"ID":"83d4df43-be89-47f3-9a70-c0e5c7751e60","Type":"ContainerDied","Data":"3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7"} Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.525969 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" event={"ID":"83d4df43-be89-47f3-9a70-c0e5c7751e60","Type":"ContainerStarted","Data":"9b78416874ed7406dd10b4e0155e724a3aad540ab26541dabd83cbd0fc3915f5"} Nov 22 09:16:45 crc kubenswrapper[4693]: I1122 09:16:45.550744 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-xk5h2" podStartSLOduration=4.759344028 podStartE2EDuration="13.550731106s" podCreationTimestamp="2025-11-22 09:16:32 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.808663287 +0000 UTC m=+789.951165578" lastFinishedPulling="2025-11-22 09:16:42.600050365 +0000 UTC m=+798.742552656" observedRunningTime="2025-11-22 09:16:45.547552672 +0000 UTC m=+801.690054963" watchObservedRunningTime="2025-11-22 09:16:45.550731106 +0000 UTC m=+801.693233397" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.153828 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" path="/var/lib/kubelet/pods/21e862c1-def8-4f98-ae0a-8fdd19e37e32/volumes" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.154395 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" path="/var/lib/kubelet/pods/917b4902-7f7c-4344-a0dc-26262efb2c82/volumes" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.535594 4693 generic.go:334] "Generic (PLEG): container finished" podID="1d64e5e3-8006-4833-a05d-705799403cc2" containerID="28d3882443521ddbc1d9e3477dcee97830895dcea10383382f852f84464dbb1e" exitCode=0 Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.535668 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d64e5e3-8006-4833-a05d-705799403cc2","Type":"ContainerDied","Data":"28d3882443521ddbc1d9e3477dcee97830895dcea10383382f852f84464dbb1e"} Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.537455 4693 generic.go:334] "Generic (PLEG): container finished" podID="957b6aef-f771-4011-b712-e53794ad836a" containerID="e91b47279f2d3609988cdb0867e0d9efff455f0cb4605f3edb4225f90765baa3" exitCode=0 Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.537537 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"957b6aef-f771-4011-b712-e53794ad836a","Type":"ContainerDied","Data":"e91b47279f2d3609988cdb0867e0d9efff455f0cb4605f3edb4225f90765baa3"} Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.539104 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" event={"ID":"83d4df43-be89-47f3-9a70-c0e5c7751e60","Type":"ContainerStarted","Data":"5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3"} Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.539173 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.541058 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" event={"ID":"8932da5d-07b8-41e0-a4a9-45dfc65fdd54","Type":"ContainerStarted","Data":"b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d"} Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.541549 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.567992 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" podStartSLOduration=3.56797202 podStartE2EDuration="3.56797202s" podCreationTimestamp="2025-11-22 09:16:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:16:46.563254813 +0000 UTC m=+802.705757104" watchObservedRunningTime="2025-11-22 09:16:46.56797202 +0000 UTC m=+802.710474310" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.590496 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" podStartSLOduration=2.590482029 podStartE2EDuration="2.590482029s" podCreationTimestamp="2025-11-22 09:16:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:16:46.589894846 +0000 UTC m=+802.732397137" watchObservedRunningTime="2025-11-22 09:16:46.590482029 +0000 UTC m=+802.732984320" Nov 22 09:16:46 crc kubenswrapper[4693]: I1122 09:16:46.819585 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:47 crc kubenswrapper[4693]: I1122 09:16:47.431129 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:47 crc kubenswrapper[4693]: I1122 09:16:47.457640 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:47 crc kubenswrapper[4693]: I1122 09:16:47.547932 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1d64e5e3-8006-4833-a05d-705799403cc2","Type":"ContainerStarted","Data":"6d926d09831401c1ea0b37f313c79d2bd3c29b3ea434f10f3cad5c74334fdc4b"} Nov 22 09:16:47 crc kubenswrapper[4693]: I1122 09:16:47.549337 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"957b6aef-f771-4011-b712-e53794ad836a","Type":"ContainerStarted","Data":"7a943bb044abb863583f23427e4dcab85101f844a5b6590d2855f51f68f28b05"} Nov 22 09:16:47 crc kubenswrapper[4693]: I1122 09:16:47.563520 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=15.365612928000001 podStartE2EDuration="24.563508778s" podCreationTimestamp="2025-11-22 09:16:23 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.374461122 +0000 UTC m=+789.516963413" lastFinishedPulling="2025-11-22 09:16:42.572356972 +0000 UTC m=+798.714859263" observedRunningTime="2025-11-22 09:16:47.562289365 +0000 UTC m=+803.704791656" watchObservedRunningTime="2025-11-22 09:16:47.563508778 +0000 UTC m=+803.706011069" Nov 22 09:16:47 crc kubenswrapper[4693]: I1122 09:16:47.578942 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=13.378767161 podStartE2EDuration="22.578927498s" podCreationTimestamp="2025-11-22 09:16:25 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.374067853 +0000 UTC m=+789.516570134" lastFinishedPulling="2025-11-22 09:16:42.574228179 +0000 UTC m=+798.716730471" observedRunningTime="2025-11-22 09:16:47.574342852 +0000 UTC m=+803.716845143" watchObservedRunningTime="2025-11-22 09:16:47.578927498 +0000 UTC m=+803.721429790" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.156808 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-48z8c"] Nov 22 09:16:48 crc kubenswrapper[4693]: E1122 09:16:48.157275 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerName="init" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.157339 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerName="init" Nov 22 09:16:48 crc kubenswrapper[4693]: E1122 09:16:48.157397 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerName="dnsmasq-dns" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.157450 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerName="dnsmasq-dns" Nov 22 09:16:48 crc kubenswrapper[4693]: E1122 09:16:48.157506 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerName="dnsmasq-dns" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.157558 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerName="dnsmasq-dns" Nov 22 09:16:48 crc kubenswrapper[4693]: E1122 09:16:48.157616 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerName="init" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.157667 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerName="init" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.157834 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="917b4902-7f7c-4344-a0dc-26262efb2c82" containerName="dnsmasq-dns" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.157933 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="21e862c1-def8-4f98-ae0a-8fdd19e37e32" containerName="dnsmasq-dns" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.158962 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.164600 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-48z8c"] Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.236026 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsmjv\" (UniqueName: \"kubernetes.io/projected/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-kube-api-access-dsmjv\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.236084 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-catalog-content\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.236108 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-utilities\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.337766 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsmjv\" (UniqueName: \"kubernetes.io/projected/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-kube-api-access-dsmjv\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.338048 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-catalog-content\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.338130 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-utilities\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.338436 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-catalog-content\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.338478 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-utilities\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.352024 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsmjv\" (UniqueName: \"kubernetes.io/projected/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-kube-api-access-dsmjv\") pod \"redhat-operators-48z8c\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.471738 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.541553 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.630543 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Nov 22 09:16:48 crc kubenswrapper[4693]: I1122 09:16:48.875465 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-48z8c"] Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.566378 4693 generic.go:334] "Generic (PLEG): container finished" podID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerID="63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5" exitCode=0 Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.566429 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerDied","Data":"63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5"} Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.566858 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerStarted","Data":"259d2fdcc6cccb9e26239b4bf54e90bc6702dc410bf835578f9d03938ed3a56b"} Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.568370 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.822534 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.955962 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.957067 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.958445 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.961383 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-h2m76" Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.961523 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.964345 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Nov 22 09:16:49 crc kubenswrapper[4693]: I1122 09:16:49.966826 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.060891 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7728d59-f897-43e3-a7d6-7d1704f41739-config\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.060951 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f7728d59-f897-43e3-a7d6-7d1704f41739-scripts\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.060970 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.060990 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.061086 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q89gr\" (UniqueName: \"kubernetes.io/projected/f7728d59-f897-43e3-a7d6-7d1704f41739-kube-api-access-q89gr\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.061125 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f7728d59-f897-43e3-a7d6-7d1704f41739-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.061142 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.162944 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f7728d59-f897-43e3-a7d6-7d1704f41739-scripts\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.162985 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163023 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163053 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q89gr\" (UniqueName: \"kubernetes.io/projected/f7728d59-f897-43e3-a7d6-7d1704f41739-kube-api-access-q89gr\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163071 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f7728d59-f897-43e3-a7d6-7d1704f41739-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163084 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163235 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7728d59-f897-43e3-a7d6-7d1704f41739-config\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163764 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f7728d59-f897-43e3-a7d6-7d1704f41739-scripts\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.163952 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f7728d59-f897-43e3-a7d6-7d1704f41739-config\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.164082 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f7728d59-f897-43e3-a7d6-7d1704f41739-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.167935 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.168079 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.169098 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7728d59-f897-43e3-a7d6-7d1704f41739-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.178861 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q89gr\" (UniqueName: \"kubernetes.io/projected/f7728d59-f897-43e3-a7d6-7d1704f41739-kube-api-access-q89gr\") pod \"ovn-northd-0\" (UID: \"f7728d59-f897-43e3-a7d6-7d1704f41739\") " pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.283241 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.572706 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerStarted","Data":"fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c"} Nov 22 09:16:50 crc kubenswrapper[4693]: I1122 09:16:50.642505 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Nov 22 09:16:50 crc kubenswrapper[4693]: W1122 09:16:50.646345 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7728d59_f897_43e3_a7d6_7d1704f41739.slice/crio-c69e329ace93b8c75435f1296263238f91ad26fb90f9b16ca0e3c7929dd36a55 WatchSource:0}: Error finding container c69e329ace93b8c75435f1296263238f91ad26fb90f9b16ca0e3c7929dd36a55: Status 404 returned error can't find the container with id c69e329ace93b8c75435f1296263238f91ad26fb90f9b16ca0e3c7929dd36a55 Nov 22 09:16:51 crc kubenswrapper[4693]: I1122 09:16:51.580032 4693 generic.go:334] "Generic (PLEG): container finished" podID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerID="fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c" exitCode=0 Nov 22 09:16:51 crc kubenswrapper[4693]: I1122 09:16:51.580090 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerDied","Data":"fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c"} Nov 22 09:16:51 crc kubenswrapper[4693]: I1122 09:16:51.581985 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f7728d59-f897-43e3-a7d6-7d1704f41739","Type":"ContainerStarted","Data":"c69e329ace93b8c75435f1296263238f91ad26fb90f9b16ca0e3c7929dd36a55"} Nov 22 09:16:51 crc kubenswrapper[4693]: I1122 09:16:51.930670 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Nov 22 09:16:52 crc kubenswrapper[4693]: I1122 09:16:52.589791 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerStarted","Data":"fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604"} Nov 22 09:16:52 crc kubenswrapper[4693]: I1122 09:16:52.591243 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f7728d59-f897-43e3-a7d6-7d1704f41739","Type":"ContainerStarted","Data":"c81a0a157993d3b1d74334c88025b4d97fc701d87369a28fdcaac4ff79eabba4"} Nov 22 09:16:52 crc kubenswrapper[4693]: I1122 09:16:52.591280 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"f7728d59-f897-43e3-a7d6-7d1704f41739","Type":"ContainerStarted","Data":"7db25363ccd8c4bc2b8099fbdfd2d3ac45215a7d621181dcacd69170f3c831bd"} Nov 22 09:16:52 crc kubenswrapper[4693]: I1122 09:16:52.591378 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Nov 22 09:16:52 crc kubenswrapper[4693]: I1122 09:16:52.604616 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-48z8c" podStartSLOduration=2.092926655 podStartE2EDuration="4.604606692s" podCreationTimestamp="2025-11-22 09:16:48 +0000 UTC" firstStartedPulling="2025-11-22 09:16:49.568154695 +0000 UTC m=+805.710656986" lastFinishedPulling="2025-11-22 09:16:52.079834731 +0000 UTC m=+808.222337023" observedRunningTime="2025-11-22 09:16:52.601107766 +0000 UTC m=+808.743610056" watchObservedRunningTime="2025-11-22 09:16:52.604606692 +0000 UTC m=+808.747108983" Nov 22 09:16:52 crc kubenswrapper[4693]: I1122 09:16:52.615342 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.179751007 podStartE2EDuration="3.615334145s" podCreationTimestamp="2025-11-22 09:16:49 +0000 UTC" firstStartedPulling="2025-11-22 09:16:50.647948673 +0000 UTC m=+806.790450964" lastFinishedPulling="2025-11-22 09:16:52.083531811 +0000 UTC m=+808.226034102" observedRunningTime="2025-11-22 09:16:52.613110555 +0000 UTC m=+808.755612847" watchObservedRunningTime="2025-11-22 09:16:52.615334145 +0000 UTC m=+808.757836436" Nov 22 09:16:54 crc kubenswrapper[4693]: I1122 09:16:54.243946 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:54 crc kubenswrapper[4693]: I1122 09:16:54.506617 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:16:54 crc kubenswrapper[4693]: I1122 09:16:54.540155 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-4lrk6"] Nov 22 09:16:54 crc kubenswrapper[4693]: I1122 09:16:54.600938 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerName="dnsmasq-dns" containerID="cri-o://5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3" gracePeriod=10 Nov 22 09:16:54 crc kubenswrapper[4693]: I1122 09:16:54.961384 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.000180 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.001494 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.073496 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.135070 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-dns-svc\") pod \"83d4df43-be89-47f3-9a70-c0e5c7751e60\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.135523 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-config\") pod \"83d4df43-be89-47f3-9a70-c0e5c7751e60\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.135589 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8g96x\" (UniqueName: \"kubernetes.io/projected/83d4df43-be89-47f3-9a70-c0e5c7751e60-kube-api-access-8g96x\") pod \"83d4df43-be89-47f3-9a70-c0e5c7751e60\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.135648 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-ovsdbserver-sb\") pod \"83d4df43-be89-47f3-9a70-c0e5c7751e60\" (UID: \"83d4df43-be89-47f3-9a70-c0e5c7751e60\") " Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.140011 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/83d4df43-be89-47f3-9a70-c0e5c7751e60-kube-api-access-8g96x" (OuterVolumeSpecName: "kube-api-access-8g96x") pod "83d4df43-be89-47f3-9a70-c0e5c7751e60" (UID: "83d4df43-be89-47f3-9a70-c0e5c7751e60"). InnerVolumeSpecName "kube-api-access-8g96x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.163428 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "83d4df43-be89-47f3-9a70-c0e5c7751e60" (UID: "83d4df43-be89-47f3-9a70-c0e5c7751e60"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.163965 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "83d4df43-be89-47f3-9a70-c0e5c7751e60" (UID: "83d4df43-be89-47f3-9a70-c0e5c7751e60"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.165512 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-config" (OuterVolumeSpecName: "config") pod "83d4df43-be89-47f3-9a70-c0e5c7751e60" (UID: "83d4df43-be89-47f3-9a70-c0e5c7751e60"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.238071 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.238098 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.238107 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8g96x\" (UniqueName: \"kubernetes.io/projected/83d4df43-be89-47f3-9a70-c0e5c7751e60-kube-api-access-8g96x\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.238117 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/83d4df43-be89-47f3-9a70-c0e5c7751e60-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.607408 4693 generic.go:334] "Generic (PLEG): container finished" podID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerID="5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3" exitCode=0 Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.607492 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.607531 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" event={"ID":"83d4df43-be89-47f3-9a70-c0e5c7751e60","Type":"ContainerDied","Data":"5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3"} Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.607561 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5848494dd9-4lrk6" event={"ID":"83d4df43-be89-47f3-9a70-c0e5c7751e60","Type":"ContainerDied","Data":"9b78416874ed7406dd10b4e0155e724a3aad540ab26541dabd83cbd0fc3915f5"} Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.607574 4693 scope.go:117] "RemoveContainer" containerID="5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.625173 4693 scope.go:117] "RemoveContainer" containerID="3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.629199 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-4lrk6"] Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.632571 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5848494dd9-4lrk6"] Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.653075 4693 scope.go:117] "RemoveContainer" containerID="5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3" Nov 22 09:16:55 crc kubenswrapper[4693]: E1122 09:16:55.653430 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3\": container with ID starting with 5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3 not found: ID does not exist" containerID="5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.653462 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3"} err="failed to get container status \"5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3\": rpc error: code = NotFound desc = could not find container \"5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3\": container with ID starting with 5bc714409f232502c80400dc277766eacd82fa22bd7e68df03476b56ced4d2a3 not found: ID does not exist" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.653481 4693 scope.go:117] "RemoveContainer" containerID="3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7" Nov 22 09:16:55 crc kubenswrapper[4693]: E1122 09:16:55.653764 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7\": container with ID starting with 3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7 not found: ID does not exist" containerID="3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.653796 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7"} err="failed to get container status \"3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7\": rpc error: code = NotFound desc = could not find container \"3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7\": container with ID starting with 3ceefd60da62a6f5751bfc30785cca64ba8ec810ac462a0b6c42233c8b8e01e7 not found: ID does not exist" Nov 22 09:16:55 crc kubenswrapper[4693]: I1122 09:16:55.663702 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.152919 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" path="/var/lib/kubelet/pods/83d4df43-be89-47f3-9a70-c0e5c7751e60/volumes" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.461683 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.461719 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.512003 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.584023 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-4148-account-create-gzh7s"] Nov 22 09:16:56 crc kubenswrapper[4693]: E1122 09:16:56.584298 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerName="dnsmasq-dns" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.584309 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerName="dnsmasq-dns" Nov 22 09:16:56 crc kubenswrapper[4693]: E1122 09:16:56.584318 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerName="init" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.584323 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerName="init" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.584476 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="83d4df43-be89-47f3-9a70-c0e5c7751e60" containerName="dnsmasq-dns" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.584930 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.586266 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.589091 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-4148-account-create-gzh7s"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.625331 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-8w4v2"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.626414 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.629658 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-8w4v2"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.669250 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.758368 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6956r\" (UniqueName: \"kubernetes.io/projected/d119d80e-d66d-4b1a-8060-063fdffe2dd3-kube-api-access-6956r\") pod \"keystone-4148-account-create-gzh7s\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.758466 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k85tf\" (UniqueName: \"kubernetes.io/projected/f2e9de7a-dfb1-41ca-a626-36067510f4b4-kube-api-access-k85tf\") pod \"keystone-db-create-8w4v2\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.758553 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e9de7a-dfb1-41ca-a626-36067510f4b4-operator-scripts\") pod \"keystone-db-create-8w4v2\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.758637 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d119d80e-d66d-4b1a-8060-063fdffe2dd3-operator-scripts\") pod \"keystone-4148-account-create-gzh7s\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.812107 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-pmkp5"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.812964 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.817372 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-pmkp5"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.860500 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e9de7a-dfb1-41ca-a626-36067510f4b4-operator-scripts\") pod \"keystone-db-create-8w4v2\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.860569 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d119d80e-d66d-4b1a-8060-063fdffe2dd3-operator-scripts\") pod \"keystone-4148-account-create-gzh7s\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.860619 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6956r\" (UniqueName: \"kubernetes.io/projected/d119d80e-d66d-4b1a-8060-063fdffe2dd3-kube-api-access-6956r\") pod \"keystone-4148-account-create-gzh7s\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.860655 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k85tf\" (UniqueName: \"kubernetes.io/projected/f2e9de7a-dfb1-41ca-a626-36067510f4b4-kube-api-access-k85tf\") pod \"keystone-db-create-8w4v2\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.861357 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d119d80e-d66d-4b1a-8060-063fdffe2dd3-operator-scripts\") pod \"keystone-4148-account-create-gzh7s\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.861532 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e9de7a-dfb1-41ca-a626-36067510f4b4-operator-scripts\") pod \"keystone-db-create-8w4v2\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.875241 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k85tf\" (UniqueName: \"kubernetes.io/projected/f2e9de7a-dfb1-41ca-a626-36067510f4b4-kube-api-access-k85tf\") pod \"keystone-db-create-8w4v2\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.875629 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6956r\" (UniqueName: \"kubernetes.io/projected/d119d80e-d66d-4b1a-8060-063fdffe2dd3-kube-api-access-6956r\") pod \"keystone-4148-account-create-gzh7s\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.882417 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-f71f-account-create-hv44c"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.883237 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.885023 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.893026 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f71f-account-create-hv44c"] Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.896825 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.937055 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.962668 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/212368f3-c21d-4470-a07f-b58004842270-operator-scripts\") pod \"placement-db-create-pmkp5\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.962866 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfbwh\" (UniqueName: \"kubernetes.io/projected/797b0e4c-377f-4fbb-bf27-acf705907346-kube-api-access-tfbwh\") pod \"placement-f71f-account-create-hv44c\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.962897 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5tpd\" (UniqueName: \"kubernetes.io/projected/212368f3-c21d-4470-a07f-b58004842270-kube-api-access-k5tpd\") pod \"placement-db-create-pmkp5\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:56 crc kubenswrapper[4693]: I1122 09:16:56.962919 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/797b0e4c-377f-4fbb-bf27-acf705907346-operator-scripts\") pod \"placement-f71f-account-create-hv44c\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.064274 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/212368f3-c21d-4470-a07f-b58004842270-operator-scripts\") pod \"placement-db-create-pmkp5\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.064334 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfbwh\" (UniqueName: \"kubernetes.io/projected/797b0e4c-377f-4fbb-bf27-acf705907346-kube-api-access-tfbwh\") pod \"placement-f71f-account-create-hv44c\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.064361 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5tpd\" (UniqueName: \"kubernetes.io/projected/212368f3-c21d-4470-a07f-b58004842270-kube-api-access-k5tpd\") pod \"placement-db-create-pmkp5\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.064382 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/797b0e4c-377f-4fbb-bf27-acf705907346-operator-scripts\") pod \"placement-f71f-account-create-hv44c\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.065059 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/212368f3-c21d-4470-a07f-b58004842270-operator-scripts\") pod \"placement-db-create-pmkp5\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.065254 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/797b0e4c-377f-4fbb-bf27-acf705907346-operator-scripts\") pod \"placement-f71f-account-create-hv44c\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.078159 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfbwh\" (UniqueName: \"kubernetes.io/projected/797b0e4c-377f-4fbb-bf27-acf705907346-kube-api-access-tfbwh\") pod \"placement-f71f-account-create-hv44c\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.078948 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5tpd\" (UniqueName: \"kubernetes.io/projected/212368f3-c21d-4470-a07f-b58004842270-kube-api-access-k5tpd\") pod \"placement-db-create-pmkp5\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.125860 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pmkp5" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.248767 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.257491 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-4148-account-create-gzh7s"] Nov 22 09:16:57 crc kubenswrapper[4693]: W1122 09:16:57.263382 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd119d80e_d66d_4b1a_8060_063fdffe2dd3.slice/crio-4a6784776deaeabd480c3a6fbcc92190d4e6f72e5e68b950c0d508e078bce38f WatchSource:0}: Error finding container 4a6784776deaeabd480c3a6fbcc92190d4e6f72e5e68b950c0d508e078bce38f: Status 404 returned error can't find the container with id 4a6784776deaeabd480c3a6fbcc92190d4e6f72e5e68b950c0d508e078bce38f Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.355151 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-8w4v2"] Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.500654 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-pmkp5"] Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.622289 4693 generic.go:334] "Generic (PLEG): container finished" podID="d119d80e-d66d-4b1a-8060-063fdffe2dd3" containerID="b50c384d824b3605d696d09b4899d4315c63d85f21804c7a743d1317b05c7afd" exitCode=0 Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.622356 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4148-account-create-gzh7s" event={"ID":"d119d80e-d66d-4b1a-8060-063fdffe2dd3","Type":"ContainerDied","Data":"b50c384d824b3605d696d09b4899d4315c63d85f21804c7a743d1317b05c7afd"} Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.622382 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4148-account-create-gzh7s" event={"ID":"d119d80e-d66d-4b1a-8060-063fdffe2dd3","Type":"ContainerStarted","Data":"4a6784776deaeabd480c3a6fbcc92190d4e6f72e5e68b950c0d508e078bce38f"} Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.623192 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pmkp5" event={"ID":"212368f3-c21d-4470-a07f-b58004842270","Type":"ContainerStarted","Data":"541a980cf5f7ccdc635c5dcd037b640da13590e289caace9a00da97932e0f9da"} Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.624376 4693 generic.go:334] "Generic (PLEG): container finished" podID="f2e9de7a-dfb1-41ca-a626-36067510f4b4" containerID="d4096b5605a401f44590daafd2f56035c2a447456a8950d00d67a74236b1b4c6" exitCode=0 Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.624788 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8w4v2" event={"ID":"f2e9de7a-dfb1-41ca-a626-36067510f4b4","Type":"ContainerDied","Data":"d4096b5605a401f44590daafd2f56035c2a447456a8950d00d67a74236b1b4c6"} Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.624811 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8w4v2" event={"ID":"f2e9de7a-dfb1-41ca-a626-36067510f4b4","Type":"ContainerStarted","Data":"dd2385cc0b1cdd10ea06057b3588bad086d325ea6819cf4260f8c27572fc1744"} Nov 22 09:16:57 crc kubenswrapper[4693]: I1122 09:16:57.635504 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-f71f-account-create-hv44c"] Nov 22 09:16:57 crc kubenswrapper[4693]: W1122 09:16:57.674245 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod797b0e4c_377f_4fbb_bf27_acf705907346.slice/crio-2947fbbe6b731903a43b3b8769d1b437841dc3228b210474b457b409b76f911f WatchSource:0}: Error finding container 2947fbbe6b731903a43b3b8769d1b437841dc3228b210474b457b409b76f911f: Status 404 returned error can't find the container with id 2947fbbe6b731903a43b3b8769d1b437841dc3228b210474b457b409b76f911f Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.472094 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.477634 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.508472 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-cnw8s"] Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.509673 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.517796 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-cnw8s"] Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.543811 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.588126 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.588252 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cj8x6\" (UniqueName: \"kubernetes.io/projected/e2244b23-21f6-4be8-a18d-9204f757709a-kube-api-access-cj8x6\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.588309 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.588373 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.588414 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-config\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.630933 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f71f-account-create-hv44c" event={"ID":"797b0e4c-377f-4fbb-bf27-acf705907346","Type":"ContainerStarted","Data":"2947fbbe6b731903a43b3b8769d1b437841dc3228b210474b457b409b76f911f"} Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.667375 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.689808 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.689890 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cj8x6\" (UniqueName: \"kubernetes.io/projected/e2244b23-21f6-4be8-a18d-9204f757709a-kube-api-access-cj8x6\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.689929 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.689952 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.689974 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-config\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.690687 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-config\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.690689 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-dns-svc\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.690950 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-nb\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.691614 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-sb\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.709282 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cj8x6\" (UniqueName: \"kubernetes.io/projected/e2244b23-21f6-4be8-a18d-9204f757709a-kube-api-access-cj8x6\") pod \"dnsmasq-dns-cf8bcbfcf-cnw8s\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.770518 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-48z8c"] Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.836458 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.922584 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:58 crc kubenswrapper[4693]: I1122 09:16:58.925490 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.094479 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k85tf\" (UniqueName: \"kubernetes.io/projected/f2e9de7a-dfb1-41ca-a626-36067510f4b4-kube-api-access-k85tf\") pod \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.094534 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6956r\" (UniqueName: \"kubernetes.io/projected/d119d80e-d66d-4b1a-8060-063fdffe2dd3-kube-api-access-6956r\") pod \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.094574 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d119d80e-d66d-4b1a-8060-063fdffe2dd3-operator-scripts\") pod \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\" (UID: \"d119d80e-d66d-4b1a-8060-063fdffe2dd3\") " Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.094674 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e9de7a-dfb1-41ca-a626-36067510f4b4-operator-scripts\") pod \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\" (UID: \"f2e9de7a-dfb1-41ca-a626-36067510f4b4\") " Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.095150 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d119d80e-d66d-4b1a-8060-063fdffe2dd3-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d119d80e-d66d-4b1a-8060-063fdffe2dd3" (UID: "d119d80e-d66d-4b1a-8060-063fdffe2dd3"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.095252 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2e9de7a-dfb1-41ca-a626-36067510f4b4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f2e9de7a-dfb1-41ca-a626-36067510f4b4" (UID: "f2e9de7a-dfb1-41ca-a626-36067510f4b4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.098829 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d119d80e-d66d-4b1a-8060-063fdffe2dd3-kube-api-access-6956r" (OuterVolumeSpecName: "kube-api-access-6956r") pod "d119d80e-d66d-4b1a-8060-063fdffe2dd3" (UID: "d119d80e-d66d-4b1a-8060-063fdffe2dd3"). InnerVolumeSpecName "kube-api-access-6956r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.098926 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2e9de7a-dfb1-41ca-a626-36067510f4b4-kube-api-access-k85tf" (OuterVolumeSpecName: "kube-api-access-k85tf") pod "f2e9de7a-dfb1-41ca-a626-36067510f4b4" (UID: "f2e9de7a-dfb1-41ca-a626-36067510f4b4"). InnerVolumeSpecName "kube-api-access-k85tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.196729 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k85tf\" (UniqueName: \"kubernetes.io/projected/f2e9de7a-dfb1-41ca-a626-36067510f4b4-kube-api-access-k85tf\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.197017 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6956r\" (UniqueName: \"kubernetes.io/projected/d119d80e-d66d-4b1a-8060-063fdffe2dd3-kube-api-access-6956r\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.197028 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d119d80e-d66d-4b1a-8060-063fdffe2dd3-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.197036 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f2e9de7a-dfb1-41ca-a626-36067510f4b4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.218433 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-cnw8s"] Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.611384 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Nov 22 09:16:59 crc kubenswrapper[4693]: E1122 09:16:59.611667 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2e9de7a-dfb1-41ca-a626-36067510f4b4" containerName="mariadb-database-create" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.611682 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2e9de7a-dfb1-41ca-a626-36067510f4b4" containerName="mariadb-database-create" Nov 22 09:16:59 crc kubenswrapper[4693]: E1122 09:16:59.611713 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d119d80e-d66d-4b1a-8060-063fdffe2dd3" containerName="mariadb-account-create" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.611718 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d119d80e-d66d-4b1a-8060-063fdffe2dd3" containerName="mariadb-account-create" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.611889 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2e9de7a-dfb1-41ca-a626-36067510f4b4" containerName="mariadb-database-create" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.611908 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d119d80e-d66d-4b1a-8060-063fdffe2dd3" containerName="mariadb-account-create" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.616690 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.618326 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-d46pq" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.618345 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.618326 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.618489 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.623741 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.637775 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-8w4v2" event={"ID":"f2e9de7a-dfb1-41ca-a626-36067510f4b4","Type":"ContainerDied","Data":"dd2385cc0b1cdd10ea06057b3588bad086d325ea6819cf4260f8c27572fc1744"} Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.637808 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="dd2385cc0b1cdd10ea06057b3588bad086d325ea6819cf4260f8c27572fc1744" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.637782 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-8w4v2" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.638811 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-4148-account-create-gzh7s" event={"ID":"d119d80e-d66d-4b1a-8060-063fdffe2dd3","Type":"ContainerDied","Data":"4a6784776deaeabd480c3a6fbcc92190d4e6f72e5e68b950c0d508e078bce38f"} Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.638870 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a6784776deaeabd480c3a6fbcc92190d4e6f72e5e68b950c0d508e078bce38f" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.638910 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-4148-account-create-gzh7s" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.640314 4693 generic.go:334] "Generic (PLEG): container finished" podID="e2244b23-21f6-4be8-a18d-9204f757709a" containerID="6f8bc043c6cc8e7e66316de4c0e84dc34dd644896314cdeefb583fecaabff998" exitCode=0 Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.640343 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" event={"ID":"e2244b23-21f6-4be8-a18d-9204f757709a","Type":"ContainerDied","Data":"6f8bc043c6cc8e7e66316de4c0e84dc34dd644896314cdeefb583fecaabff998"} Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.640379 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" event={"ID":"e2244b23-21f6-4be8-a18d-9204f757709a","Type":"ContainerStarted","Data":"d9d2cb59b5fa514bb49767ef7d39534b78715fe1b712338352d91c82d33a8f69"} Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.641408 4693 generic.go:334] "Generic (PLEG): container finished" podID="797b0e4c-377f-4fbb-bf27-acf705907346" containerID="54099bcd3b274e8d9621fb589a28e09e3fb000ef65b4c9218e97b02e6729895b" exitCode=0 Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.641443 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f71f-account-create-hv44c" event={"ID":"797b0e4c-377f-4fbb-bf27-acf705907346","Type":"ContainerDied","Data":"54099bcd3b274e8d9621fb589a28e09e3fb000ef65b4c9218e97b02e6729895b"} Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.642418 4693 generic.go:334] "Generic (PLEG): container finished" podID="212368f3-c21d-4470-a07f-b58004842270" containerID="c8544bb38558e53ec3128880bcfdee6e543c086bada3a7146db8b2c0434c2462" exitCode=0 Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.642441 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pmkp5" event={"ID":"212368f3-c21d-4470-a07f-b58004842270","Type":"ContainerDied","Data":"c8544bb38558e53ec3128880bcfdee6e543c086bada3a7146db8b2c0434c2462"} Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.703764 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.704007 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/50bf6e78-87b2-416e-89b5-fa163645a184-cache\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.704051 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/50bf6e78-87b2-416e-89b5-fa163645a184-lock\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.704183 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9dxm\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-kube-api-access-x9dxm\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.704223 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.806283 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.806341 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/50bf6e78-87b2-416e-89b5-fa163645a184-cache\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.806387 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/50bf6e78-87b2-416e-89b5-fa163645a184-lock\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: E1122 09:16:59.806502 4693 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 09:16:59 crc kubenswrapper[4693]: E1122 09:16:59.806520 4693 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 09:16:59 crc kubenswrapper[4693]: E1122 09:16:59.806557 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift podName:50bf6e78-87b2-416e-89b5-fa163645a184 nodeName:}" failed. No retries permitted until 2025-11-22 09:17:00.306544073 +0000 UTC m=+816.449046364 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift") pod "swift-storage-0" (UID: "50bf6e78-87b2-416e-89b5-fa163645a184") : configmap "swift-ring-files" not found Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.806578 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9dxm\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-kube-api-access-x9dxm\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.806622 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.807081 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.807982 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/50bf6e78-87b2-416e-89b5-fa163645a184-lock\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.808272 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/50bf6e78-87b2-416e-89b5-fa163645a184-cache\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.819703 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9dxm\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-kube-api-access-x9dxm\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:16:59 crc kubenswrapper[4693]: I1122 09:16:59.822046 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.115939 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-p9zqf"] Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.116833 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.118110 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.118122 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.118530 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.158809 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-p9zqf"] Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212529 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-ring-data-devices\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212582 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xk9s\" (UniqueName: \"kubernetes.io/projected/23436668-ec00-4623-b854-aec77bf25158-kube-api-access-8xk9s\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212634 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/23436668-ec00-4623-b854-aec77bf25158-etc-swift\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212654 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-combined-ca-bundle\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212676 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-swiftconf\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212738 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-scripts\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.212812 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-dispersionconf\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.314503 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-swiftconf\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.314545 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-scripts\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.314580 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-dispersionconf\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.314625 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.314710 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-ring-data-devices\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.314727 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xk9s\" (UniqueName: \"kubernetes.io/projected/23436668-ec00-4623-b854-aec77bf25158-kube-api-access-8xk9s\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: E1122 09:17:00.314934 4693 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 09:17:00 crc kubenswrapper[4693]: E1122 09:17:00.314967 4693 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 09:17:00 crc kubenswrapper[4693]: E1122 09:17:00.315016 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift podName:50bf6e78-87b2-416e-89b5-fa163645a184 nodeName:}" failed. No retries permitted until 2025-11-22 09:17:01.315000887 +0000 UTC m=+817.457503178 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift") pod "swift-storage-0" (UID: "50bf6e78-87b2-416e-89b5-fa163645a184") : configmap "swift-ring-files" not found Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.315361 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/23436668-ec00-4623-b854-aec77bf25158-etc-swift\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.315419 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-combined-ca-bundle\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.315434 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-scripts\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.315502 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-ring-data-devices\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.315604 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/23436668-ec00-4623-b854-aec77bf25158-etc-swift\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.317543 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-swiftconf\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.318123 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-combined-ca-bundle\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.318540 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-dispersionconf\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.328178 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xk9s\" (UniqueName: \"kubernetes.io/projected/23436668-ec00-4623-b854-aec77bf25158-kube-api-access-8xk9s\") pod \"swift-ring-rebalance-p9zqf\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.430221 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.650577 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" event={"ID":"e2244b23-21f6-4be8-a18d-9204f757709a","Type":"ContainerStarted","Data":"78a60223ccc9e4edc2f9cacb551457f116229029fb8495ce70cb1607e236e96e"} Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.650803 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-48z8c" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="registry-server" containerID="cri-o://fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604" gracePeriod=2 Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.668966 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" podStartSLOduration=2.668952854 podStartE2EDuration="2.668952854s" podCreationTimestamp="2025-11-22 09:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:00.665777106 +0000 UTC m=+816.808279397" watchObservedRunningTime="2025-11-22 09:17:00.668952854 +0000 UTC m=+816.811455145" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.773017 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-p9zqf"] Nov 22 09:17:00 crc kubenswrapper[4693]: W1122 09:17:00.779150 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod23436668_ec00_4623_b854_aec77bf25158.slice/crio-840f497f845e963db56f9f0834dc0c01db182bbac2067c4d3e27f7a8652e971b WatchSource:0}: Error finding container 840f497f845e963db56f9f0834dc0c01db182bbac2067c4d3e27f7a8652e971b: Status 404 returned error can't find the container with id 840f497f845e963db56f9f0834dc0c01db182bbac2067c4d3e27f7a8652e971b Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.969171 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pmkp5" Nov 22 09:17:00 crc kubenswrapper[4693]: I1122 09:17:00.973079 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.126625 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfbwh\" (UniqueName: \"kubernetes.io/projected/797b0e4c-377f-4fbb-bf27-acf705907346-kube-api-access-tfbwh\") pod \"797b0e4c-377f-4fbb-bf27-acf705907346\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.126686 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/797b0e4c-377f-4fbb-bf27-acf705907346-operator-scripts\") pod \"797b0e4c-377f-4fbb-bf27-acf705907346\" (UID: \"797b0e4c-377f-4fbb-bf27-acf705907346\") " Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.126767 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k5tpd\" (UniqueName: \"kubernetes.io/projected/212368f3-c21d-4470-a07f-b58004842270-kube-api-access-k5tpd\") pod \"212368f3-c21d-4470-a07f-b58004842270\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.126785 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/212368f3-c21d-4470-a07f-b58004842270-operator-scripts\") pod \"212368f3-c21d-4470-a07f-b58004842270\" (UID: \"212368f3-c21d-4470-a07f-b58004842270\") " Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.127385 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/212368f3-c21d-4470-a07f-b58004842270-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "212368f3-c21d-4470-a07f-b58004842270" (UID: "212368f3-c21d-4470-a07f-b58004842270"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.127759 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/797b0e4c-377f-4fbb-bf27-acf705907346-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "797b0e4c-377f-4fbb-bf27-acf705907346" (UID: "797b0e4c-377f-4fbb-bf27-acf705907346"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.131088 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/797b0e4c-377f-4fbb-bf27-acf705907346-kube-api-access-tfbwh" (OuterVolumeSpecName: "kube-api-access-tfbwh") pod "797b0e4c-377f-4fbb-bf27-acf705907346" (UID: "797b0e4c-377f-4fbb-bf27-acf705907346"). InnerVolumeSpecName "kube-api-access-tfbwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.131121 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/212368f3-c21d-4470-a07f-b58004842270-kube-api-access-k5tpd" (OuterVolumeSpecName: "kube-api-access-k5tpd") pod "212368f3-c21d-4470-a07f-b58004842270" (UID: "212368f3-c21d-4470-a07f-b58004842270"). InnerVolumeSpecName "kube-api-access-k5tpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.228362 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/797b0e4c-377f-4fbb-bf27-acf705907346-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.228385 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k5tpd\" (UniqueName: \"kubernetes.io/projected/212368f3-c21d-4470-a07f-b58004842270-kube-api-access-k5tpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.228395 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/212368f3-c21d-4470-a07f-b58004842270-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.228403 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfbwh\" (UniqueName: \"kubernetes.io/projected/797b0e4c-377f-4fbb-bf27-acf705907346-kube-api-access-tfbwh\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.329827 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:01 crc kubenswrapper[4693]: E1122 09:17:01.329955 4693 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 09:17:01 crc kubenswrapper[4693]: E1122 09:17:01.329977 4693 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 09:17:01 crc kubenswrapper[4693]: E1122 09:17:01.330019 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift podName:50bf6e78-87b2-416e-89b5-fa163645a184 nodeName:}" failed. No retries permitted until 2025-11-22 09:17:03.33000704 +0000 UTC m=+819.472509332 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift") pod "swift-storage-0" (UID: "50bf6e78-87b2-416e-89b5-fa163645a184") : configmap "swift-ring-files" not found Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.657716 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p9zqf" event={"ID":"23436668-ec00-4623-b854-aec77bf25158","Type":"ContainerStarted","Data":"840f497f845e963db56f9f0834dc0c01db182bbac2067c4d3e27f7a8652e971b"} Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.659226 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-pmkp5" event={"ID":"212368f3-c21d-4470-a07f-b58004842270","Type":"ContainerDied","Data":"541a980cf5f7ccdc635c5dcd037b640da13590e289caace9a00da97932e0f9da"} Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.659250 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="541a980cf5f7ccdc635c5dcd037b640da13590e289caace9a00da97932e0f9da" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.659291 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-pmkp5" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.661591 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-f71f-account-create-hv44c" event={"ID":"797b0e4c-377f-4fbb-bf27-acf705907346","Type":"ContainerDied","Data":"2947fbbe6b731903a43b3b8769d1b437841dc3228b210474b457b409b76f911f"} Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.661619 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2947fbbe6b731903a43b3b8769d1b437841dc3228b210474b457b409b76f911f" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.661635 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-f71f-account-create-hv44c" Nov 22 09:17:01 crc kubenswrapper[4693]: I1122 09:17:01.661728 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:17:01 crc kubenswrapper[4693]: E1122 09:17:01.717763 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bdb6a2f_c0d5_4575_a0e6_b958edd39144.slice/crio-conmon-fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604.scope\": RecentStats: unable to find data in memory cache]" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.031339 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-xbqdj"] Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.031629 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="212368f3-c21d-4470-a07f-b58004842270" containerName="mariadb-database-create" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.031641 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="212368f3-c21d-4470-a07f-b58004842270" containerName="mariadb-database-create" Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.031657 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797b0e4c-377f-4fbb-bf27-acf705907346" containerName="mariadb-account-create" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.031663 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="797b0e4c-377f-4fbb-bf27-acf705907346" containerName="mariadb-account-create" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.031812 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="212368f3-c21d-4470-a07f-b58004842270" containerName="mariadb-database-create" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.031822 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="797b0e4c-377f-4fbb-bf27-acf705907346" containerName="mariadb-account-create" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.032309 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.037262 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-xbqdj"] Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.116824 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.142328 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-02aa-account-create-mszg4"] Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.142662 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="extract-utilities" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.142679 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="extract-utilities" Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.142702 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="extract-content" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.142708 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="extract-content" Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.142716 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="registry-server" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.142721 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="registry-server" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.142897 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerName="registry-server" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.143385 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.144744 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.146018 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8989032-0fdb-4a30-969e-4654c3a97e46-operator-scripts\") pod \"glance-db-create-xbqdj\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.146127 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vczb6\" (UniqueName: \"kubernetes.io/projected/e8989032-0fdb-4a30-969e-4654c3a97e46-kube-api-access-vczb6\") pod \"glance-db-create-xbqdj\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.158956 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-02aa-account-create-mszg4"] Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.249526 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-utilities\") pod \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.249572 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-catalog-content\") pod \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.249816 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dsmjv\" (UniqueName: \"kubernetes.io/projected/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-kube-api-access-dsmjv\") pod \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\" (UID: \"9bdb6a2f-c0d5-4575-a0e6-b958edd39144\") " Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.250153 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vczb6\" (UniqueName: \"kubernetes.io/projected/e8989032-0fdb-4a30-969e-4654c3a97e46-kube-api-access-vczb6\") pod \"glance-db-create-xbqdj\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.250208 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bff1c168-3d8b-40e0-93c6-bb2099287437-operator-scripts\") pod \"glance-02aa-account-create-mszg4\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.250274 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8989032-0fdb-4a30-969e-4654c3a97e46-operator-scripts\") pod \"glance-db-create-xbqdj\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.250355 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rflwm\" (UniqueName: \"kubernetes.io/projected/bff1c168-3d8b-40e0-93c6-bb2099287437-kube-api-access-rflwm\") pod \"glance-02aa-account-create-mszg4\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.250541 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-utilities" (OuterVolumeSpecName: "utilities") pod "9bdb6a2f-c0d5-4575-a0e6-b958edd39144" (UID: "9bdb6a2f-c0d5-4575-a0e6-b958edd39144"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.251143 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8989032-0fdb-4a30-969e-4654c3a97e46-operator-scripts\") pod \"glance-db-create-xbqdj\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.254078 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-kube-api-access-dsmjv" (OuterVolumeSpecName: "kube-api-access-dsmjv") pod "9bdb6a2f-c0d5-4575-a0e6-b958edd39144" (UID: "9bdb6a2f-c0d5-4575-a0e6-b958edd39144"). InnerVolumeSpecName "kube-api-access-dsmjv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.264591 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vczb6\" (UniqueName: \"kubernetes.io/projected/e8989032-0fdb-4a30-969e-4654c3a97e46-kube-api-access-vczb6\") pod \"glance-db-create-xbqdj\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.319812 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9bdb6a2f-c0d5-4575-a0e6-b958edd39144" (UID: "9bdb6a2f-c0d5-4575-a0e6-b958edd39144"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.346643 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.351379 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rflwm\" (UniqueName: \"kubernetes.io/projected/bff1c168-3d8b-40e0-93c6-bb2099287437-kube-api-access-rflwm\") pod \"glance-02aa-account-create-mszg4\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.351457 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bff1c168-3d8b-40e0-93c6-bb2099287437-operator-scripts\") pod \"glance-02aa-account-create-mszg4\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.351527 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dsmjv\" (UniqueName: \"kubernetes.io/projected/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-kube-api-access-dsmjv\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.351538 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.351547 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9bdb6a2f-c0d5-4575-a0e6-b958edd39144-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.352193 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bff1c168-3d8b-40e0-93c6-bb2099287437-operator-scripts\") pod \"glance-02aa-account-create-mszg4\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.364873 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rflwm\" (UniqueName: \"kubernetes.io/projected/bff1c168-3d8b-40e0-93c6-bb2099287437-kube-api-access-rflwm\") pod \"glance-02aa-account-create-mszg4\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.457629 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.669239 4693 generic.go:334] "Generic (PLEG): container finished" podID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" containerID="fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604" exitCode=0 Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.669963 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerDied","Data":"fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604"} Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.669985 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-48z8c" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.670005 4693 scope.go:117] "RemoveContainer" containerID="fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.669995 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-48z8c" event={"ID":"9bdb6a2f-c0d5-4575-a0e6-b958edd39144","Type":"ContainerDied","Data":"259d2fdcc6cccb9e26239b4bf54e90bc6702dc410bf835578f9d03938ed3a56b"} Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.688837 4693 scope.go:117] "RemoveContainer" containerID="fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.696792 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-48z8c"] Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.700708 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-48z8c"] Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.714148 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-xbqdj"] Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.715738 4693 scope.go:117] "RemoveContainer" containerID="63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5" Nov 22 09:17:02 crc kubenswrapper[4693]: W1122 09:17:02.720393 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8989032_0fdb_4a30_969e_4654c3a97e46.slice/crio-2ca19c8092ac9e8ae87daa23142913d3ddd1eb34ed2c92fc1baf9dde9355bbc9 WatchSource:0}: Error finding container 2ca19c8092ac9e8ae87daa23142913d3ddd1eb34ed2c92fc1baf9dde9355bbc9: Status 404 returned error can't find the container with id 2ca19c8092ac9e8ae87daa23142913d3ddd1eb34ed2c92fc1baf9dde9355bbc9 Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.731348 4693 scope.go:117] "RemoveContainer" containerID="fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604" Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.731656 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604\": container with ID starting with fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604 not found: ID does not exist" containerID="fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.731688 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604"} err="failed to get container status \"fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604\": rpc error: code = NotFound desc = could not find container \"fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604\": container with ID starting with fd03fdea0d6d4e8e65eab9f583960dad5b0394092c2b15075feea9c61c536604 not found: ID does not exist" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.731705 4693 scope.go:117] "RemoveContainer" containerID="fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c" Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.732020 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c\": container with ID starting with fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c not found: ID does not exist" containerID="fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.732891 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c"} err="failed to get container status \"fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c\": rpc error: code = NotFound desc = could not find container \"fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c\": container with ID starting with fea9a493919098a1e26dd2e0c40f2f48acb214cc8c46e582b70a3bd2c397a38c not found: ID does not exist" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.732925 4693 scope.go:117] "RemoveContainer" containerID="63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5" Nov 22 09:17:02 crc kubenswrapper[4693]: E1122 09:17:02.734255 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5\": container with ID starting with 63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5 not found: ID does not exist" containerID="63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5" Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.734300 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5"} err="failed to get container status \"63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5\": rpc error: code = NotFound desc = could not find container \"63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5\": container with ID starting with 63d8942f570990c4804d73db7a59692c575153811c36f1300409c72cc5419dd5 not found: ID does not exist" Nov 22 09:17:02 crc kubenswrapper[4693]: W1122 09:17:02.822795 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbff1c168_3d8b_40e0_93c6_bb2099287437.slice/crio-75b97ae96c86ab7b67782f37664082080f48c632304b9ce0641a80b5cf47367b WatchSource:0}: Error finding container 75b97ae96c86ab7b67782f37664082080f48c632304b9ce0641a80b5cf47367b: Status 404 returned error can't find the container with id 75b97ae96c86ab7b67782f37664082080f48c632304b9ce0641a80b5cf47367b Nov 22 09:17:02 crc kubenswrapper[4693]: I1122 09:17:02.827250 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-02aa-account-create-mszg4"] Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.368176 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:03 crc kubenswrapper[4693]: E1122 09:17:03.368348 4693 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 09:17:03 crc kubenswrapper[4693]: E1122 09:17:03.368606 4693 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 09:17:03 crc kubenswrapper[4693]: E1122 09:17:03.368661 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift podName:50bf6e78-87b2-416e-89b5-fa163645a184 nodeName:}" failed. No retries permitted until 2025-11-22 09:17:07.368644497 +0000 UTC m=+823.511146788 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift") pod "swift-storage-0" (UID: "50bf6e78-87b2-416e-89b5-fa163645a184") : configmap "swift-ring-files" not found Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.676751 4693 generic.go:334] "Generic (PLEG): container finished" podID="bff1c168-3d8b-40e0-93c6-bb2099287437" containerID="dcc21f302db42beb74cc74bed7105f34ae65d435733ce906850f298da6cdd021" exitCode=0 Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.676796 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-02aa-account-create-mszg4" event={"ID":"bff1c168-3d8b-40e0-93c6-bb2099287437","Type":"ContainerDied","Data":"dcc21f302db42beb74cc74bed7105f34ae65d435733ce906850f298da6cdd021"} Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.676832 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-02aa-account-create-mszg4" event={"ID":"bff1c168-3d8b-40e0-93c6-bb2099287437","Type":"ContainerStarted","Data":"75b97ae96c86ab7b67782f37664082080f48c632304b9ce0641a80b5cf47367b"} Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.679284 4693 generic.go:334] "Generic (PLEG): container finished" podID="e8989032-0fdb-4a30-969e-4654c3a97e46" containerID="caa061c5aebce046bbf4bb08476d710369531f72a30c521f730750c190ae77b0" exitCode=0 Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.679314 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xbqdj" event={"ID":"e8989032-0fdb-4a30-969e-4654c3a97e46","Type":"ContainerDied","Data":"caa061c5aebce046bbf4bb08476d710369531f72a30c521f730750c190ae77b0"} Nov 22 09:17:03 crc kubenswrapper[4693]: I1122 09:17:03.679329 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xbqdj" event={"ID":"e8989032-0fdb-4a30-969e-4654c3a97e46","Type":"ContainerStarted","Data":"2ca19c8092ac9e8ae87daa23142913d3ddd1eb34ed2c92fc1baf9dde9355bbc9"} Nov 22 09:17:04 crc kubenswrapper[4693]: I1122 09:17:04.154166 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bdb6a2f-c0d5-4575-a0e6-b958edd39144" path="/var/lib/kubelet/pods/9bdb6a2f-c0d5-4575-a0e6-b958edd39144/volumes" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.293310 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.296328 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.342226 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.401424 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vczb6\" (UniqueName: \"kubernetes.io/projected/e8989032-0fdb-4a30-969e-4654c3a97e46-kube-api-access-vczb6\") pod \"e8989032-0fdb-4a30-969e-4654c3a97e46\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.401538 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bff1c168-3d8b-40e0-93c6-bb2099287437-operator-scripts\") pod \"bff1c168-3d8b-40e0-93c6-bb2099287437\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.401602 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8989032-0fdb-4a30-969e-4654c3a97e46-operator-scripts\") pod \"e8989032-0fdb-4a30-969e-4654c3a97e46\" (UID: \"e8989032-0fdb-4a30-969e-4654c3a97e46\") " Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.401635 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rflwm\" (UniqueName: \"kubernetes.io/projected/bff1c168-3d8b-40e0-93c6-bb2099287437-kube-api-access-rflwm\") pod \"bff1c168-3d8b-40e0-93c6-bb2099287437\" (UID: \"bff1c168-3d8b-40e0-93c6-bb2099287437\") " Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.402306 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8989032-0fdb-4a30-969e-4654c3a97e46-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "e8989032-0fdb-4a30-969e-4654c3a97e46" (UID: "e8989032-0fdb-4a30-969e-4654c3a97e46"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.402306 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bff1c168-3d8b-40e0-93c6-bb2099287437-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "bff1c168-3d8b-40e0-93c6-bb2099287437" (UID: "bff1c168-3d8b-40e0-93c6-bb2099287437"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.405376 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8989032-0fdb-4a30-969e-4654c3a97e46-kube-api-access-vczb6" (OuterVolumeSpecName: "kube-api-access-vczb6") pod "e8989032-0fdb-4a30-969e-4654c3a97e46" (UID: "e8989032-0fdb-4a30-969e-4654c3a97e46"). InnerVolumeSpecName "kube-api-access-vczb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.405817 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bff1c168-3d8b-40e0-93c6-bb2099287437-kube-api-access-rflwm" (OuterVolumeSpecName: "kube-api-access-rflwm") pod "bff1c168-3d8b-40e0-93c6-bb2099287437" (UID: "bff1c168-3d8b-40e0-93c6-bb2099287437"). InnerVolumeSpecName "kube-api-access-rflwm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.504342 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vczb6\" (UniqueName: \"kubernetes.io/projected/e8989032-0fdb-4a30-969e-4654c3a97e46-kube-api-access-vczb6\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.504375 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/bff1c168-3d8b-40e0-93c6-bb2099287437-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.504385 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/e8989032-0fdb-4a30-969e-4654c3a97e46-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.504396 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rflwm\" (UniqueName: \"kubernetes.io/projected/bff1c168-3d8b-40e0-93c6-bb2099287437-kube-api-access-rflwm\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.700551 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p9zqf" event={"ID":"23436668-ec00-4623-b854-aec77bf25158","Type":"ContainerStarted","Data":"1fed825f1f8c875b5a9d597ab4f50500eaf1da4a5352d6473eabae20878618c5"} Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.702526 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-xbqdj" event={"ID":"e8989032-0fdb-4a30-969e-4654c3a97e46","Type":"ContainerDied","Data":"2ca19c8092ac9e8ae87daa23142913d3ddd1eb34ed2c92fc1baf9dde9355bbc9"} Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.702554 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-xbqdj" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.702558 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ca19c8092ac9e8ae87daa23142913d3ddd1eb34ed2c92fc1baf9dde9355bbc9" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.711375 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-02aa-account-create-mszg4" event={"ID":"bff1c168-3d8b-40e0-93c6-bb2099287437","Type":"ContainerDied","Data":"75b97ae96c86ab7b67782f37664082080f48c632304b9ce0641a80b5cf47367b"} Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.711400 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="75b97ae96c86ab7b67782f37664082080f48c632304b9ce0641a80b5cf47367b" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.711413 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-02aa-account-create-mszg4" Nov 22 09:17:05 crc kubenswrapper[4693]: I1122 09:17:05.720479 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-p9zqf" podStartSLOduration=1.299545676 podStartE2EDuration="5.720466857s" podCreationTimestamp="2025-11-22 09:17:00 +0000 UTC" firstStartedPulling="2025-11-22 09:17:00.781232635 +0000 UTC m=+816.923734927" lastFinishedPulling="2025-11-22 09:17:05.202153818 +0000 UTC m=+821.344656108" observedRunningTime="2025-11-22 09:17:05.714805336 +0000 UTC m=+821.857307627" watchObservedRunningTime="2025-11-22 09:17:05.720466857 +0000 UTC m=+821.862969148" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.328541 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-5sz6k"] Nov 22 09:17:07 crc kubenswrapper[4693]: E1122 09:17:07.329473 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8989032-0fdb-4a30-969e-4654c3a97e46" containerName="mariadb-database-create" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.329498 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8989032-0fdb-4a30-969e-4654c3a97e46" containerName="mariadb-database-create" Nov 22 09:17:07 crc kubenswrapper[4693]: E1122 09:17:07.329532 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bff1c168-3d8b-40e0-93c6-bb2099287437" containerName="mariadb-account-create" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.329540 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="bff1c168-3d8b-40e0-93c6-bb2099287437" containerName="mariadb-account-create" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.329749 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="bff1c168-3d8b-40e0-93c6-bb2099287437" containerName="mariadb-account-create" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.329770 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8989032-0fdb-4a30-969e-4654c3a97e46" containerName="mariadb-database-create" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.330627 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.333695 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cwf55" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.333870 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.341477 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5sz6k"] Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.433404 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-combined-ca-bundle\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.433487 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jfnt6\" (UniqueName: \"kubernetes.io/projected/78b948e6-0096-4708-9bd3-74a2a8d7cc37-kube-api-access-jfnt6\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.433597 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-db-sync-config-data\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.433758 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.433796 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-config-data\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: E1122 09:17:07.433956 4693 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Nov 22 09:17:07 crc kubenswrapper[4693]: E1122 09:17:07.433976 4693 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Nov 22 09:17:07 crc kubenswrapper[4693]: E1122 09:17:07.434018 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift podName:50bf6e78-87b2-416e-89b5-fa163645a184 nodeName:}" failed. No retries permitted until 2025-11-22 09:17:15.434005109 +0000 UTC m=+831.576507400 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift") pod "swift-storage-0" (UID: "50bf6e78-87b2-416e-89b5-fa163645a184") : configmap "swift-ring-files" not found Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.535482 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-config-data\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.535542 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-combined-ca-bundle\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.535619 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jfnt6\" (UniqueName: \"kubernetes.io/projected/78b948e6-0096-4708-9bd3-74a2a8d7cc37-kube-api-access-jfnt6\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.535651 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-db-sync-config-data\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.541146 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-db-sync-config-data\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.541270 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-config-data\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.542593 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-combined-ca-bundle\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.549283 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jfnt6\" (UniqueName: \"kubernetes.io/projected/78b948e6-0096-4708-9bd3-74a2a8d7cc37-kube-api-access-jfnt6\") pod \"glance-db-sync-5sz6k\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:07 crc kubenswrapper[4693]: I1122 09:17:07.648888 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:08 crc kubenswrapper[4693]: I1122 09:17:08.117473 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-5sz6k"] Nov 22 09:17:08 crc kubenswrapper[4693]: W1122 09:17:08.120082 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod78b948e6_0096_4708_9bd3_74a2a8d7cc37.slice/crio-4b21237284bbff32551c73392b6be481155e5eeb4da7c2b06c7391b1b32d5d8a WatchSource:0}: Error finding container 4b21237284bbff32551c73392b6be481155e5eeb4da7c2b06c7391b1b32d5d8a: Status 404 returned error can't find the container with id 4b21237284bbff32551c73392b6be481155e5eeb4da7c2b06c7391b1b32d5d8a Nov 22 09:17:08 crc kubenswrapper[4693]: I1122 09:17:08.745398 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5sz6k" event={"ID":"78b948e6-0096-4708-9bd3-74a2a8d7cc37","Type":"ContainerStarted","Data":"4b21237284bbff32551c73392b6be481155e5eeb4da7c2b06c7391b1b32d5d8a"} Nov 22 09:17:08 crc kubenswrapper[4693]: I1122 09:17:08.837914 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:17:08 crc kubenswrapper[4693]: I1122 09:17:08.876723 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-z9tqf"] Nov 22 09:17:08 crc kubenswrapper[4693]: I1122 09:17:08.876992 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="dnsmasq-dns" containerID="cri-o://b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d" gracePeriod=10 Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.505688 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.110:5353: connect: connection refused" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.742643 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.756386 4693 generic.go:334] "Generic (PLEG): container finished" podID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerID="b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d" exitCode=0 Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.756423 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" event={"ID":"8932da5d-07b8-41e0-a4a9-45dfc65fdd54","Type":"ContainerDied","Data":"b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d"} Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.756449 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" event={"ID":"8932da5d-07b8-41e0-a4a9-45dfc65fdd54","Type":"ContainerDied","Data":"af0f157a52a672b482564f95e6ef7153700e73a31c0cd7405d9f583994c2accb"} Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.756468 4693 scope.go:117] "RemoveContainer" containerID="b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.756493 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5c7b6b5695-z9tqf" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.775461 4693 scope.go:117] "RemoveContainer" containerID="696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.797199 4693 scope.go:117] "RemoveContainer" containerID="b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d" Nov 22 09:17:09 crc kubenswrapper[4693]: E1122 09:17:09.797637 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d\": container with ID starting with b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d not found: ID does not exist" containerID="b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.797672 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d"} err="failed to get container status \"b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d\": rpc error: code = NotFound desc = could not find container \"b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d\": container with ID starting with b5284406a37419318cebc3caa86dc392a5a3817d8b07cd59755a862e19c41b8d not found: ID does not exist" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.797693 4693 scope.go:117] "RemoveContainer" containerID="696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7" Nov 22 09:17:09 crc kubenswrapper[4693]: E1122 09:17:09.798152 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7\": container with ID starting with 696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7 not found: ID does not exist" containerID="696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.798175 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7"} err="failed to get container status \"696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7\": rpc error: code = NotFound desc = could not find container \"696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7\": container with ID starting with 696c11e5d0b6089da16a210e386bc9e2adb734b8e64d3f8ea38ebc05764d23f7 not found: ID does not exist" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.875487 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-sb\") pod \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.875637 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-nb\") pod \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.875686 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-config\") pod \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.875717 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-dns-svc\") pod \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.875869 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kqh9s\" (UniqueName: \"kubernetes.io/projected/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-kube-api-access-kqh9s\") pod \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\" (UID: \"8932da5d-07b8-41e0-a4a9-45dfc65fdd54\") " Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.883754 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-kube-api-access-kqh9s" (OuterVolumeSpecName: "kube-api-access-kqh9s") pod "8932da5d-07b8-41e0-a4a9-45dfc65fdd54" (UID: "8932da5d-07b8-41e0-a4a9-45dfc65fdd54"). InnerVolumeSpecName "kube-api-access-kqh9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.906289 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8932da5d-07b8-41e0-a4a9-45dfc65fdd54" (UID: "8932da5d-07b8-41e0-a4a9-45dfc65fdd54"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.906995 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8932da5d-07b8-41e0-a4a9-45dfc65fdd54" (UID: "8932da5d-07b8-41e0-a4a9-45dfc65fdd54"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.908567 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-config" (OuterVolumeSpecName: "config") pod "8932da5d-07b8-41e0-a4a9-45dfc65fdd54" (UID: "8932da5d-07b8-41e0-a4a9-45dfc65fdd54"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.910316 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8932da5d-07b8-41e0-a4a9-45dfc65fdd54" (UID: "8932da5d-07b8-41e0-a4a9-45dfc65fdd54"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.977995 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.978019 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.978032 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.978040 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:09 crc kubenswrapper[4693]: I1122 09:17:09.978050 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kqh9s\" (UniqueName: \"kubernetes.io/projected/8932da5d-07b8-41e0-a4a9-45dfc65fdd54-kube-api-access-kqh9s\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:10 crc kubenswrapper[4693]: I1122 09:17:10.083541 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-z9tqf"] Nov 22 09:17:10 crc kubenswrapper[4693]: I1122 09:17:10.092659 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5c7b6b5695-z9tqf"] Nov 22 09:17:10 crc kubenswrapper[4693]: I1122 09:17:10.156095 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" path="/var/lib/kubelet/pods/8932da5d-07b8-41e0-a4a9-45dfc65fdd54/volumes" Nov 22 09:17:10 crc kubenswrapper[4693]: I1122 09:17:10.764895 4693 generic.go:334] "Generic (PLEG): container finished" podID="23436668-ec00-4623-b854-aec77bf25158" containerID="1fed825f1f8c875b5a9d597ab4f50500eaf1da4a5352d6473eabae20878618c5" exitCode=0 Nov 22 09:17:10 crc kubenswrapper[4693]: I1122 09:17:10.764972 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p9zqf" event={"ID":"23436668-ec00-4623-b854-aec77bf25158","Type":"ContainerDied","Data":"1fed825f1f8c875b5a9d597ab4f50500eaf1da4a5352d6473eabae20878618c5"} Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.011087 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.112963 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xk9s\" (UniqueName: \"kubernetes.io/projected/23436668-ec00-4623-b854-aec77bf25158-kube-api-access-8xk9s\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.113013 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-dispersionconf\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.113040 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-scripts\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.113092 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-swiftconf\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.113112 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-ring-data-devices\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.113140 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/23436668-ec00-4623-b854-aec77bf25158-etc-swift\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.113225 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-combined-ca-bundle\") pod \"23436668-ec00-4623-b854-aec77bf25158\" (UID: \"23436668-ec00-4623-b854-aec77bf25158\") " Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.114518 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.115129 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/23436668-ec00-4623-b854-aec77bf25158-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.121272 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.122004 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/23436668-ec00-4623-b854-aec77bf25158-kube-api-access-8xk9s" (OuterVolumeSpecName: "kube-api-access-8xk9s") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "kube-api-access-8xk9s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.130119 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-scripts" (OuterVolumeSpecName: "scripts") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.132007 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.132228 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "23436668-ec00-4623-b854-aec77bf25158" (UID: "23436668-ec00-4623-b854-aec77bf25158"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215262 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xk9s\" (UniqueName: \"kubernetes.io/projected/23436668-ec00-4623-b854-aec77bf25158-kube-api-access-8xk9s\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215315 4693 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-dispersionconf\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215337 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215345 4693 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-swiftconf\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215354 4693 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/23436668-ec00-4623-b854-aec77bf25158-ring-data-devices\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215362 4693 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/23436668-ec00-4623-b854-aec77bf25158-etc-swift\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.215371 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/23436668-ec00-4623-b854-aec77bf25158-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.779402 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-p9zqf" event={"ID":"23436668-ec00-4623-b854-aec77bf25158","Type":"ContainerDied","Data":"840f497f845e963db56f9f0834dc0c01db182bbac2067c4d3e27f7a8652e971b"} Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.779445 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="840f497f845e963db56f9f0834dc0c01db182bbac2067c4d3e27f7a8652e971b" Nov 22 09:17:12 crc kubenswrapper[4693]: I1122 09:17:12.779465 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-p9zqf" Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.470879 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.476871 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/50bf6e78-87b2-416e-89b5-fa163645a184-etc-swift\") pod \"swift-storage-0\" (UID: \"50bf6e78-87b2-416e-89b5-fa163645a184\") " pod="openstack/swift-storage-0" Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.527620 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.797982 4693 generic.go:334] "Generic (PLEG): container finished" podID="99df5d88-540a-495c-a688-43f4d63ffa45" containerID="b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4" exitCode=0 Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.798059 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"99df5d88-540a-495c-a688-43f4d63ffa45","Type":"ContainerDied","Data":"b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4"} Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.799765 4693 generic.go:334] "Generic (PLEG): container finished" podID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerID="6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef" exitCode=0 Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.799827 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c687d537-2713-42cf-9f20-ef640bbd6c3c","Type":"ContainerDied","Data":"6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef"} Nov 22 09:17:15 crc kubenswrapper[4693]: I1122 09:17:15.940641 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Nov 22 09:17:15 crc kubenswrapper[4693]: W1122 09:17:15.947151 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod50bf6e78_87b2_416e_89b5_fa163645a184.slice/crio-8dccf3293a129f1577423dfa1be2d83a4cb58ca9bab70e18a8cb25ad6577ea9a WatchSource:0}: Error finding container 8dccf3293a129f1577423dfa1be2d83a4cb58ca9bab70e18a8cb25ad6577ea9a: Status 404 returned error can't find the container with id 8dccf3293a129f1577423dfa1be2d83a4cb58ca9bab70e18a8cb25ad6577ea9a Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.807740 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"8dccf3293a129f1577423dfa1be2d83a4cb58ca9bab70e18a8cb25ad6577ea9a"} Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.810673 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"99df5d88-540a-495c-a688-43f4d63ffa45","Type":"ContainerStarted","Data":"41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d"} Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.810896 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.812171 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c687d537-2713-42cf-9f20-ef640bbd6c3c","Type":"ContainerStarted","Data":"ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd"} Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.812666 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.829014 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=46.912174952 podStartE2EDuration="54.828996929s" podCreationTimestamp="2025-11-22 09:16:22 +0000 UTC" firstStartedPulling="2025-11-22 09:16:33.374338081 +0000 UTC m=+789.516840372" lastFinishedPulling="2025-11-22 09:16:41.291160059 +0000 UTC m=+797.433662349" observedRunningTime="2025-11-22 09:17:16.827891842 +0000 UTC m=+832.970394133" watchObservedRunningTime="2025-11-22 09:17:16.828996929 +0000 UTC m=+832.971499220" Nov 22 09:17:16 crc kubenswrapper[4693]: I1122 09:17:16.846812 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=46.399463731 podStartE2EDuration="54.846797076s" podCreationTimestamp="2025-11-22 09:16:22 +0000 UTC" firstStartedPulling="2025-11-22 09:16:32.843688994 +0000 UTC m=+788.986191285" lastFinishedPulling="2025-11-22 09:16:41.291022339 +0000 UTC m=+797.433524630" observedRunningTime="2025-11-22 09:17:16.843598955 +0000 UTC m=+832.986101246" watchObservedRunningTime="2025-11-22 09:17:16.846797076 +0000 UTC m=+832.989299357" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.552825 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-czsjw" podUID="6ebf2de1-2769-47bc-8136-4ff7460b89b1" containerName="ovn-controller" probeResult="failure" output=< Nov 22 09:17:17 crc kubenswrapper[4693]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Nov 22 09:17:17 crc kubenswrapper[4693]: > Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.564988 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.576237 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-xk5h2" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.761958 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-czsjw-config-m968d"] Nov 22 09:17:17 crc kubenswrapper[4693]: E1122 09:17:17.762253 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="23436668-ec00-4623-b854-aec77bf25158" containerName="swift-ring-rebalance" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.762269 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="23436668-ec00-4623-b854-aec77bf25158" containerName="swift-ring-rebalance" Nov 22 09:17:17 crc kubenswrapper[4693]: E1122 09:17:17.762283 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="dnsmasq-dns" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.762289 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="dnsmasq-dns" Nov 22 09:17:17 crc kubenswrapper[4693]: E1122 09:17:17.762304 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="init" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.762310 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="init" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.762437 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8932da5d-07b8-41e0-a4a9-45dfc65fdd54" containerName="dnsmasq-dns" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.762449 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="23436668-ec00-4623-b854-aec77bf25158" containerName="swift-ring-rebalance" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.762917 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.764998 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.775419 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-czsjw-config-m968d"] Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.925906 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-additional-scripts\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.925961 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.926031 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-log-ovn\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.926392 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4tzvv\" (UniqueName: \"kubernetes.io/projected/ec577a43-c0e0-4795-899c-7a1423d1a875-kube-api-access-4tzvv\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.926482 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-scripts\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:17 crc kubenswrapper[4693]: I1122 09:17:17.926585 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run-ovn\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.028949 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-additional-scripts\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029000 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029040 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-log-ovn\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029066 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4tzvv\" (UniqueName: \"kubernetes.io/projected/ec577a43-c0e0-4795-899c-7a1423d1a875-kube-api-access-4tzvv\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029105 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-scripts\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029135 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run-ovn\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029414 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run-ovn\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029438 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-log-ovn\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.029503 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.030004 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-additional-scripts\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.032295 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-scripts\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.045150 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4tzvv\" (UniqueName: \"kubernetes.io/projected/ec577a43-c0e0-4795-899c-7a1423d1a875-kube-api-access-4tzvv\") pod \"ovn-controller-czsjw-config-m968d\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:18 crc kubenswrapper[4693]: I1122 09:17:18.078657 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:21 crc kubenswrapper[4693]: I1122 09:17:21.628633 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-czsjw-config-m968d"] Nov 22 09:17:21 crc kubenswrapper[4693]: W1122 09:17:21.638107 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec577a43_c0e0_4795_899c_7a1423d1a875.slice/crio-8a0f9ddc7c5dfe4372c7e215792c27ee43134ec07a5e60c785e9f8e8bc9572f9 WatchSource:0}: Error finding container 8a0f9ddc7c5dfe4372c7e215792c27ee43134ec07a5e60c785e9f8e8bc9572f9: Status 404 returned error can't find the container with id 8a0f9ddc7c5dfe4372c7e215792c27ee43134ec07a5e60c785e9f8e8bc9572f9 Nov 22 09:17:21 crc kubenswrapper[4693]: I1122 09:17:21.850674 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5sz6k" event={"ID":"78b948e6-0096-4708-9bd3-74a2a8d7cc37","Type":"ContainerStarted","Data":"fa9499b404d2eec2239d43a6eca74d26c3746b0142792bc4b9f8d19eabc3f4ba"} Nov 22 09:17:21 crc kubenswrapper[4693]: I1122 09:17:21.852081 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-czsjw-config-m968d" event={"ID":"ec577a43-c0e0-4795-899c-7a1423d1a875","Type":"ContainerStarted","Data":"4244c9d40cece41a21ba36545023fe7fc1475dc0974bd947d29d0b99e58b000e"} Nov 22 09:17:21 crc kubenswrapper[4693]: I1122 09:17:21.852114 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-czsjw-config-m968d" event={"ID":"ec577a43-c0e0-4795-899c-7a1423d1a875","Type":"ContainerStarted","Data":"8a0f9ddc7c5dfe4372c7e215792c27ee43134ec07a5e60c785e9f8e8bc9572f9"} Nov 22 09:17:21 crc kubenswrapper[4693]: I1122 09:17:21.871117 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-5sz6k" podStartSLOduration=1.680448469 podStartE2EDuration="14.871103799s" podCreationTimestamp="2025-11-22 09:17:07 +0000 UTC" firstStartedPulling="2025-11-22 09:17:08.122454646 +0000 UTC m=+824.264956937" lastFinishedPulling="2025-11-22 09:17:21.313109976 +0000 UTC m=+837.455612267" observedRunningTime="2025-11-22 09:17:21.867125401 +0000 UTC m=+838.009627692" watchObservedRunningTime="2025-11-22 09:17:21.871103799 +0000 UTC m=+838.013606090" Nov 22 09:17:21 crc kubenswrapper[4693]: I1122 09:17:21.885268 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-czsjw-config-m968d" podStartSLOduration=4.885253826 podStartE2EDuration="4.885253826s" podCreationTimestamp="2025-11-22 09:17:17 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:21.880693785 +0000 UTC m=+838.023196076" watchObservedRunningTime="2025-11-22 09:17:21.885253826 +0000 UTC m=+838.027756117" Nov 22 09:17:22 crc kubenswrapper[4693]: E1122 09:17:22.102131 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec577a43_c0e0_4795_899c_7a1423d1a875.slice/crio-4244c9d40cece41a21ba36545023fe7fc1475dc0974bd947d29d0b99e58b000e.scope\": RecentStats: unable to find data in memory cache]" Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.546280 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-czsjw" Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.858519 4693 generic.go:334] "Generic (PLEG): container finished" podID="ec577a43-c0e0-4795-899c-7a1423d1a875" containerID="4244c9d40cece41a21ba36545023fe7fc1475dc0974bd947d29d0b99e58b000e" exitCode=0 Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.858624 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-czsjw-config-m968d" event={"ID":"ec577a43-c0e0-4795-899c-7a1423d1a875","Type":"ContainerDied","Data":"4244c9d40cece41a21ba36545023fe7fc1475dc0974bd947d29d0b99e58b000e"} Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.861358 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"132636dcc6ab526d352028ff9c48a822bb2576d8569e394b6f3bf7de6ca28922"} Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.861389 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"82c89beaa6ff475dc7713ce4db13a45dbb139e9ed81e41caa21b650f20e03028"} Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.861400 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"be304abd83503289eac9e00e80d8088892400159881793c53c2c20e986c5d292"} Nov 22 09:17:22 crc kubenswrapper[4693]: I1122 09:17:22.861408 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"4b4ebdaa723cc0bd7d000c5abd5efcf8c44e253fd71aeed0225b3f162f3fd771"} Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.100963 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.224764 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run\") pod \"ec577a43-c0e0-4795-899c-7a1423d1a875\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.224839 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-log-ovn\") pod \"ec577a43-c0e0-4795-899c-7a1423d1a875\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.224872 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run" (OuterVolumeSpecName: "var-run") pod "ec577a43-c0e0-4795-899c-7a1423d1a875" (UID: "ec577a43-c0e0-4795-899c-7a1423d1a875"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.224888 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "ec577a43-c0e0-4795-899c-7a1423d1a875" (UID: "ec577a43-c0e0-4795-899c-7a1423d1a875"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.224910 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run-ovn\") pod \"ec577a43-c0e0-4795-899c-7a1423d1a875\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.224925 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "ec577a43-c0e0-4795-899c-7a1423d1a875" (UID: "ec577a43-c0e0-4795-899c-7a1423d1a875"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225038 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-scripts\") pod \"ec577a43-c0e0-4795-899c-7a1423d1a875\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225091 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4tzvv\" (UniqueName: \"kubernetes.io/projected/ec577a43-c0e0-4795-899c-7a1423d1a875-kube-api-access-4tzvv\") pod \"ec577a43-c0e0-4795-899c-7a1423d1a875\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225117 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-additional-scripts\") pod \"ec577a43-c0e0-4795-899c-7a1423d1a875\" (UID: \"ec577a43-c0e0-4795-899c-7a1423d1a875\") " Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225482 4693 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225499 4693 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-log-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225508 4693 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/ec577a43-c0e0-4795-899c-7a1423d1a875-var-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.225950 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "ec577a43-c0e0-4795-899c-7a1423d1a875" (UID: "ec577a43-c0e0-4795-899c-7a1423d1a875"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.226311 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-scripts" (OuterVolumeSpecName: "scripts") pod "ec577a43-c0e0-4795-899c-7a1423d1a875" (UID: "ec577a43-c0e0-4795-899c-7a1423d1a875"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.238925 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ec577a43-c0e0-4795-899c-7a1423d1a875-kube-api-access-4tzvv" (OuterVolumeSpecName: "kube-api-access-4tzvv") pod "ec577a43-c0e0-4795-899c-7a1423d1a875" (UID: "ec577a43-c0e0-4795-899c-7a1423d1a875"). InnerVolumeSpecName "kube-api-access-4tzvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.327175 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.327201 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4tzvv\" (UniqueName: \"kubernetes.io/projected/ec577a43-c0e0-4795-899c-7a1423d1a875-kube-api-access-4tzvv\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.327211 4693 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/ec577a43-c0e0-4795-899c-7a1423d1a875-additional-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.685993 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-czsjw-config-m968d"] Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.692328 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-czsjw-config-m968d"] Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.873583 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8a0f9ddc7c5dfe4372c7e215792c27ee43134ec07a5e60c785e9f8e8bc9572f9" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.873636 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-czsjw-config-m968d" Nov 22 09:17:24 crc kubenswrapper[4693]: I1122 09:17:24.876008 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"241f15f479406d9269de1044923b830ac6ab2c0c61f5b74b6e8a1975d5c077e6"} Nov 22 09:17:25 crc kubenswrapper[4693]: I1122 09:17:25.883814 4693 generic.go:334] "Generic (PLEG): container finished" podID="78b948e6-0096-4708-9bd3-74a2a8d7cc37" containerID="fa9499b404d2eec2239d43a6eca74d26c3746b0142792bc4b9f8d19eabc3f4ba" exitCode=0 Nov 22 09:17:25 crc kubenswrapper[4693]: I1122 09:17:25.883918 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5sz6k" event={"ID":"78b948e6-0096-4708-9bd3-74a2a8d7cc37","Type":"ContainerDied","Data":"fa9499b404d2eec2239d43a6eca74d26c3746b0142792bc4b9f8d19eabc3f4ba"} Nov 22 09:17:25 crc kubenswrapper[4693]: I1122 09:17:25.888571 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"20420acee215106918c56d99043a8c7937ffc5845741df14c635298ce374038f"} Nov 22 09:17:25 crc kubenswrapper[4693]: I1122 09:17:25.888598 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"d5fff3615e454eac379d29857c44290acad7f3cd5c093cbd9ccadb3a13994478"} Nov 22 09:17:25 crc kubenswrapper[4693]: I1122 09:17:25.888610 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"9f1097c24a4c9ed3bc62e8d41f157d60c6d392d825b25aa62889c4fcf0b70289"} Nov 22 09:17:26 crc kubenswrapper[4693]: I1122 09:17:26.154729 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ec577a43-c0e0-4795-899c-7a1423d1a875" path="/var/lib/kubelet/pods/ec577a43-c0e0-4795-899c-7a1423d1a875/volumes" Nov 22 09:17:26 crc kubenswrapper[4693]: I1122 09:17:26.907752 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"93c154e7c8eb14c759c7cb5fee1235dcb4f9830f72236b99d3bac231c95befd9"} Nov 22 09:17:26 crc kubenswrapper[4693]: I1122 09:17:26.908014 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"69bc6c2dc59c31a3dc196a337265b9dd4103b22b0f5de8953ebe833b357c0ae2"} Nov 22 09:17:26 crc kubenswrapper[4693]: I1122 09:17:26.908028 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"9bf61eee1341f47a89d4ffe965a3256bdd227bcf920dd7805927fd87f33046bd"} Nov 22 09:17:26 crc kubenswrapper[4693]: I1122 09:17:26.908037 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"11121caa4515b97acfa1c3a0d6797ed8caf86e3471514461ddb5db49ecbe7c28"} Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.170361 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.364139 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-combined-ca-bundle\") pod \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.364418 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-config-data\") pod \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.364454 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-db-sync-config-data\") pod \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.364535 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jfnt6\" (UniqueName: \"kubernetes.io/projected/78b948e6-0096-4708-9bd3-74a2a8d7cc37-kube-api-access-jfnt6\") pod \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\" (UID: \"78b948e6-0096-4708-9bd3-74a2a8d7cc37\") " Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.368405 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "78b948e6-0096-4708-9bd3-74a2a8d7cc37" (UID: "78b948e6-0096-4708-9bd3-74a2a8d7cc37"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.368495 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78b948e6-0096-4708-9bd3-74a2a8d7cc37-kube-api-access-jfnt6" (OuterVolumeSpecName: "kube-api-access-jfnt6") pod "78b948e6-0096-4708-9bd3-74a2a8d7cc37" (UID: "78b948e6-0096-4708-9bd3-74a2a8d7cc37"). InnerVolumeSpecName "kube-api-access-jfnt6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.381615 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "78b948e6-0096-4708-9bd3-74a2a8d7cc37" (UID: "78b948e6-0096-4708-9bd3-74a2a8d7cc37"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.393569 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-config-data" (OuterVolumeSpecName: "config-data") pod "78b948e6-0096-4708-9bd3-74a2a8d7cc37" (UID: "78b948e6-0096-4708-9bd3-74a2a8d7cc37"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.466321 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.466348 4693 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.466361 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jfnt6\" (UniqueName: \"kubernetes.io/projected/78b948e6-0096-4708-9bd3-74a2a8d7cc37-kube-api-access-jfnt6\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.466370 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/78b948e6-0096-4708-9bd3-74a2a8d7cc37-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.914653 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-5sz6k" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.914668 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-5sz6k" event={"ID":"78b948e6-0096-4708-9bd3-74a2a8d7cc37","Type":"ContainerDied","Data":"4b21237284bbff32551c73392b6be481155e5eeb4da7c2b06c7391b1b32d5d8a"} Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.914706 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b21237284bbff32551c73392b6be481155e5eeb4da7c2b06c7391b1b32d5d8a" Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.925216 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"d14471014797c9a611ca336ecd8d4698f07f235302ae5b249a298d31ee4c83f6"} Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.925246 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"fa8026b831d0a1439cb4710cea0a78874e41b2533805116d094b1cd37dc7baa0"} Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.925257 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"50bf6e78-87b2-416e-89b5-fa163645a184","Type":"ContainerStarted","Data":"2dc0bbc53555616249aaec557432afa06ef926859d13229a694e760a8f663c07"} Nov 22 09:17:27 crc kubenswrapper[4693]: I1122 09:17:27.950729 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=19.476548515 podStartE2EDuration="29.950713547s" podCreationTimestamp="2025-11-22 09:16:58 +0000 UTC" firstStartedPulling="2025-11-22 09:17:15.949203963 +0000 UTC m=+832.091706254" lastFinishedPulling="2025-11-22 09:17:26.423368996 +0000 UTC m=+842.565871286" observedRunningTime="2025-11-22 09:17:27.944960173 +0000 UTC m=+844.087462464" watchObservedRunningTime="2025-11-22 09:17:27.950713547 +0000 UTC m=+844.093215838" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.174455 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-jxgq8"] Nov 22 09:17:28 crc kubenswrapper[4693]: E1122 09:17:28.174954 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ec577a43-c0e0-4795-899c-7a1423d1a875" containerName="ovn-config" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.174971 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ec577a43-c0e0-4795-899c-7a1423d1a875" containerName="ovn-config" Nov 22 09:17:28 crc kubenswrapper[4693]: E1122 09:17:28.174994 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b948e6-0096-4708-9bd3-74a2a8d7cc37" containerName="glance-db-sync" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.175000 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b948e6-0096-4708-9bd3-74a2a8d7cc37" containerName="glance-db-sync" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.175142 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ec577a43-c0e0-4795-899c-7a1423d1a875" containerName="ovn-config" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.175194 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="78b948e6-0096-4708-9bd3-74a2a8d7cc37" containerName="glance-db-sync" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.175898 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.178548 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.186996 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-jxgq8"] Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.216256 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-jxgq8"] Nov 22 09:17:28 crc kubenswrapper[4693]: E1122 09:17:28.216790 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[config dns-svc dns-swift-storage-0 kube-api-access-zf5rg ovsdbserver-nb ovsdbserver-sb], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" podUID="938b14dd-0137-4cf2-b289-7c0f5d88a9bd" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.239610 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-cb6b7c77c-l62xw"] Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.240820 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.259118 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb6b7c77c-l62xw"] Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.276203 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-nb\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.276355 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-sb\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.276425 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-swift-storage-0\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.276476 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-config\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.276562 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-svc\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.276656 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf5rg\" (UniqueName: \"kubernetes.io/projected/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-kube-api-access-zf5rg\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.377985 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf5rg\" (UniqueName: \"kubernetes.io/projected/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-kube-api-access-zf5rg\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378041 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-swift-storage-0\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378090 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-nb\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378110 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-svc\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378133 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmv74\" (UniqueName: \"kubernetes.io/projected/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-kube-api-access-cmv74\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378155 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-config\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378179 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-sb\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378375 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-swift-storage-0\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378471 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-config\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378503 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-sb\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378547 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-nb\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378582 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-svc\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.378980 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-nb\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.379009 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-sb\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.379173 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-swift-storage-0\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.379254 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-config\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.379293 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-svc\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.393151 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf5rg\" (UniqueName: \"kubernetes.io/projected/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-kube-api-access-zf5rg\") pod \"dnsmasq-dns-86c887b9fc-jxgq8\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.479763 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-sb\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.479813 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-nb\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.479880 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-swift-storage-0\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.479923 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-svc\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.479941 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmv74\" (UniqueName: \"kubernetes.io/projected/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-kube-api-access-cmv74\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.479966 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-config\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.480649 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-sb\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.480686 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-nb\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.480708 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-svc\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.480725 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-config\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.480818 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-swift-storage-0\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.493405 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmv74\" (UniqueName: \"kubernetes.io/projected/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-kube-api-access-cmv74\") pod \"dnsmasq-dns-cb6b7c77c-l62xw\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.552507 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.930709 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.939375 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:28 crc kubenswrapper[4693]: I1122 09:17:28.943061 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-cb6b7c77c-l62xw"] Nov 22 09:17:28 crc kubenswrapper[4693]: W1122 09:17:28.946798 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod84631f2f_64de_4a1a_a4ba_6d19d02ccd52.slice/crio-a9b8ac7acea28d75b3faae56ebdd633fab127d33f67262a464376f306661c07f WatchSource:0}: Error finding container a9b8ac7acea28d75b3faae56ebdd633fab127d33f67262a464376f306661c07f: Status 404 returned error can't find the container with id a9b8ac7acea28d75b3faae56ebdd633fab127d33f67262a464376f306661c07f Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088297 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-sb\") pod \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088359 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-nb\") pod \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088426 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-svc\") pod \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088503 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-swift-storage-0\") pod \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088529 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zf5rg\" (UniqueName: \"kubernetes.io/projected/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-kube-api-access-zf5rg\") pod \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088586 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-config\") pod \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\" (UID: \"938b14dd-0137-4cf2-b289-7c0f5d88a9bd\") " Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088679 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "938b14dd-0137-4cf2-b289-7c0f5d88a9bd" (UID: "938b14dd-0137-4cf2-b289-7c0f5d88a9bd"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088869 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "938b14dd-0137-4cf2-b289-7c0f5d88a9bd" (UID: "938b14dd-0137-4cf2-b289-7c0f5d88a9bd"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088915 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "938b14dd-0137-4cf2-b289-7c0f5d88a9bd" (UID: "938b14dd-0137-4cf2-b289-7c0f5d88a9bd"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.088986 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "938b14dd-0137-4cf2-b289-7c0f5d88a9bd" (UID: "938b14dd-0137-4cf2-b289-7c0f5d88a9bd"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.089024 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.089114 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-config" (OuterVolumeSpecName: "config") pod "938b14dd-0137-4cf2-b289-7c0f5d88a9bd" (UID: "938b14dd-0137-4cf2-b289-7c0f5d88a9bd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.091880 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-kube-api-access-zf5rg" (OuterVolumeSpecName: "kube-api-access-zf5rg") pod "938b14dd-0137-4cf2-b289-7c0f5d88a9bd" (UID: "938b14dd-0137-4cf2-b289-7c0f5d88a9bd"). InnerVolumeSpecName "kube-api-access-zf5rg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.189738 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.189760 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.189770 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.189779 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.189787 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zf5rg\" (UniqueName: \"kubernetes.io/projected/938b14dd-0137-4cf2-b289-7c0f5d88a9bd-kube-api-access-zf5rg\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.938159 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" event={"ID":"84631f2f-64de-4a1a-a4ba-6d19d02ccd52","Type":"ContainerStarted","Data":"a9b8ac7acea28d75b3faae56ebdd633fab127d33f67262a464376f306661c07f"} Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.938182 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86c887b9fc-jxgq8" Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.971659 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-jxgq8"] Nov 22 09:17:29 crc kubenswrapper[4693]: I1122 09:17:29.975931 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86c887b9fc-jxgq8"] Nov 22 09:17:30 crc kubenswrapper[4693]: I1122 09:17:30.152986 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="938b14dd-0137-4cf2-b289-7c0f5d88a9bd" path="/var/lib/kubelet/pods/938b14dd-0137-4cf2-b289-7c0f5d88a9bd/volumes" Nov 22 09:17:31 crc kubenswrapper[4693]: I1122 09:17:31.950826 4693 generic.go:334] "Generic (PLEG): container finished" podID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerID="56510dbabbe3890b7ca9b3a324ec59009dc4ce9a9273645be92965e712e22d84" exitCode=0 Nov 22 09:17:31 crc kubenswrapper[4693]: I1122 09:17:31.950880 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" event={"ID":"84631f2f-64de-4a1a-a4ba-6d19d02ccd52","Type":"ContainerDied","Data":"56510dbabbe3890b7ca9b3a324ec59009dc4ce9a9273645be92965e712e22d84"} Nov 22 09:17:32 crc kubenswrapper[4693]: I1122 09:17:32.957502 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" event={"ID":"84631f2f-64de-4a1a-a4ba-6d19d02ccd52","Type":"ContainerStarted","Data":"5b1cbc94dc76ba2d10ec91115f5c08136a658184b45f4c9b28d0d0ce348cefc6"} Nov 22 09:17:32 crc kubenswrapper[4693]: I1122 09:17:32.958116 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:32 crc kubenswrapper[4693]: I1122 09:17:32.971205 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" podStartSLOduration=4.9711820509999995 podStartE2EDuration="4.971182051s" podCreationTimestamp="2025-11-22 09:17:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:32.968493434 +0000 UTC m=+849.110995725" watchObservedRunningTime="2025-11-22 09:17:32.971182051 +0000 UTC m=+849.113684343" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.353053 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.561071 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-mpg6l"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.561921 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.573069 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-mpg6l"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.601007 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.676509 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-2170-account-create-t2wl8"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.677358 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.684680 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.693275 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2170-account-create-t2wl8"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.751069 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e2855-d931-4003-b10c-ecb7712d4793-operator-scripts\") pod \"barbican-db-create-mpg6l\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.751131 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmx6z\" (UniqueName: \"kubernetes.io/projected/956e2855-d931-4003-b10c-ecb7712d4793-kube-api-access-vmx6z\") pod \"barbican-db-create-mpg6l\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.777709 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-7bss2"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.781242 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.790988 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7bss2"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.797962 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-b5a6-account-create-gb6vh"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.799040 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.802830 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.804766 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b5a6-account-create-gb6vh"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.852369 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e2855-d931-4003-b10c-ecb7712d4793-operator-scripts\") pod \"barbican-db-create-mpg6l\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.852418 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmx6z\" (UniqueName: \"kubernetes.io/projected/956e2855-d931-4003-b10c-ecb7712d4793-kube-api-access-vmx6z\") pod \"barbican-db-create-mpg6l\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.852454 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fls92\" (UniqueName: \"kubernetes.io/projected/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-kube-api-access-fls92\") pod \"barbican-2170-account-create-t2wl8\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.852524 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-operator-scripts\") pod \"barbican-2170-account-create-t2wl8\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.853042 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e2855-d931-4003-b10c-ecb7712d4793-operator-scripts\") pod \"barbican-db-create-mpg6l\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.857691 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-kc2bz"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.858661 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.862062 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-kc2bz"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.871431 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmx6z\" (UniqueName: \"kubernetes.io/projected/956e2855-d931-4003-b10c-ecb7712d4793-kube-api-access-vmx6z\") pod \"barbican-db-create-mpg6l\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.883034 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.915408 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-c6rqt"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.916464 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.918181 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.918217 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k575m" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.918405 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.918427 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.927434 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-c6rqt"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954202 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkwdm\" (UniqueName: \"kubernetes.io/projected/8c63c124-8c2a-4bde-8a4d-50441a10ba02-kube-api-access-xkwdm\") pod \"cinder-b5a6-account-create-gb6vh\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954262 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fls92\" (UniqueName: \"kubernetes.io/projected/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-kube-api-access-fls92\") pod \"barbican-2170-account-create-t2wl8\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954309 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7bc687-9339-4ddd-a589-a23bb9880872-operator-scripts\") pod \"neutron-db-create-kc2bz\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954379 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c63c124-8c2a-4bde-8a4d-50441a10ba02-operator-scripts\") pod \"cinder-b5a6-account-create-gb6vh\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954410 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9r8mk\" (UniqueName: \"kubernetes.io/projected/4b7bc687-9339-4ddd-a589-a23bb9880872-kube-api-access-9r8mk\") pod \"neutron-db-create-kc2bz\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954478 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-operator-scripts\") pod \"cinder-db-create-7bss2\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954502 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-operator-scripts\") pod \"barbican-2170-account-create-t2wl8\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.954772 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4949v\" (UniqueName: \"kubernetes.io/projected/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-kube-api-access-4949v\") pod \"cinder-db-create-7bss2\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.955316 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-operator-scripts\") pod \"barbican-2170-account-create-t2wl8\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.974295 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-178d-account-create-swm6h"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.975223 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.983179 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-178d-account-create-swm6h"] Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.985244 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fls92\" (UniqueName: \"kubernetes.io/projected/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-kube-api-access-fls92\") pod \"barbican-2170-account-create-t2wl8\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.985762 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Nov 22 09:17:33 crc kubenswrapper[4693]: I1122 09:17:33.990085 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.058950 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4949v\" (UniqueName: \"kubernetes.io/projected/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-kube-api-access-4949v\") pod \"cinder-db-create-7bss2\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059249 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkwdm\" (UniqueName: \"kubernetes.io/projected/8c63c124-8c2a-4bde-8a4d-50441a10ba02-kube-api-access-xkwdm\") pod \"cinder-b5a6-account-create-gb6vh\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059284 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7bc687-9339-4ddd-a589-a23bb9880872-operator-scripts\") pod \"neutron-db-create-kc2bz\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059324 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c63c124-8c2a-4bde-8a4d-50441a10ba02-operator-scripts\") pod \"cinder-b5a6-account-create-gb6vh\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059352 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-config-data\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059372 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9r8mk\" (UniqueName: \"kubernetes.io/projected/4b7bc687-9339-4ddd-a589-a23bb9880872-kube-api-access-9r8mk\") pod \"neutron-db-create-kc2bz\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059404 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-combined-ca-bundle\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059420 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qnb9c\" (UniqueName: \"kubernetes.io/projected/f0917b65-1f5c-4c04-8189-a070d8409c64-kube-api-access-qnb9c\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059438 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-operator-scripts\") pod \"cinder-db-create-7bss2\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.059900 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7bc687-9339-4ddd-a589-a23bb9880872-operator-scripts\") pod \"neutron-db-create-kc2bz\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.060419 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c63c124-8c2a-4bde-8a4d-50441a10ba02-operator-scripts\") pod \"cinder-b5a6-account-create-gb6vh\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.061748 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-operator-scripts\") pod \"cinder-db-create-7bss2\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.089269 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4949v\" (UniqueName: \"kubernetes.io/projected/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-kube-api-access-4949v\") pod \"cinder-db-create-7bss2\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.091589 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9r8mk\" (UniqueName: \"kubernetes.io/projected/4b7bc687-9339-4ddd-a589-a23bb9880872-kube-api-access-9r8mk\") pod \"neutron-db-create-kc2bz\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.091755 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkwdm\" (UniqueName: \"kubernetes.io/projected/8c63c124-8c2a-4bde-8a4d-50441a10ba02-kube-api-access-xkwdm\") pod \"cinder-b5a6-account-create-gb6vh\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.093720 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.110963 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.153629 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-mpg6l"] Nov 22 09:17:34 crc kubenswrapper[4693]: W1122 09:17:34.157036 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod956e2855_d931_4003_b10c_ecb7712d4793.slice/crio-0c42f01fea0e66c78b13d1c99ecc862e329f69d8b2f1cc814c490969dc5a9d73 WatchSource:0}: Error finding container 0c42f01fea0e66c78b13d1c99ecc862e329f69d8b2f1cc814c490969dc5a9d73: Status 404 returned error can't find the container with id 0c42f01fea0e66c78b13d1c99ecc862e329f69d8b2f1cc814c490969dc5a9d73 Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.161208 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-config-data\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.161262 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5v49\" (UniqueName: \"kubernetes.io/projected/d7451501-ca2e-42fc-884b-fc0ca2fa393e-kube-api-access-q5v49\") pod \"neutron-178d-account-create-swm6h\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.161289 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-combined-ca-bundle\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.161305 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qnb9c\" (UniqueName: \"kubernetes.io/projected/f0917b65-1f5c-4c04-8189-a070d8409c64-kube-api-access-qnb9c\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.161381 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7451501-ca2e-42fc-884b-fc0ca2fa393e-operator-scripts\") pod \"neutron-178d-account-create-swm6h\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.168836 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-config-data\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.171879 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-combined-ca-bundle\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.174873 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.181759 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qnb9c\" (UniqueName: \"kubernetes.io/projected/f0917b65-1f5c-4c04-8189-a070d8409c64-kube-api-access-qnb9c\") pod \"keystone-db-sync-c6rqt\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.262478 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.263117 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7451501-ca2e-42fc-884b-fc0ca2fa393e-operator-scripts\") pod \"neutron-178d-account-create-swm6h\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.263459 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5v49\" (UniqueName: \"kubernetes.io/projected/d7451501-ca2e-42fc-884b-fc0ca2fa393e-kube-api-access-q5v49\") pod \"neutron-178d-account-create-swm6h\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.263821 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7451501-ca2e-42fc-884b-fc0ca2fa393e-operator-scripts\") pod \"neutron-178d-account-create-swm6h\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.285396 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5v49\" (UniqueName: \"kubernetes.io/projected/d7451501-ca2e-42fc-884b-fc0ca2fa393e-kube-api-access-q5v49\") pod \"neutron-178d-account-create-swm6h\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.360144 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.457396 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-2170-account-create-t2wl8"] Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.551168 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-7bss2"] Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.575583 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-b5a6-account-create-gb6vh"] Nov 22 09:17:34 crc kubenswrapper[4693]: W1122 09:17:34.579802 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c63c124_8c2a_4bde_8a4d_50441a10ba02.slice/crio-cd3deab17f99ab32cbc0d9c9cb9cc9da079bb6c3288031afbe69db1b92905e4f WatchSource:0}: Error finding container cd3deab17f99ab32cbc0d9c9cb9cc9da079bb6c3288031afbe69db1b92905e4f: Status 404 returned error can't find the container with id cd3deab17f99ab32cbc0d9c9cb9cc9da079bb6c3288031afbe69db1b92905e4f Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.652723 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-kc2bz"] Nov 22 09:17:34 crc kubenswrapper[4693]: W1122 09:17:34.658768 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4b7bc687_9339_4ddd_a589_a23bb9880872.slice/crio-017ae2d23a16a6d924d4f26b6f70ecfc673ea6b29a319a84157f8e8c5fa1281a WatchSource:0}: Error finding container 017ae2d23a16a6d924d4f26b6f70ecfc673ea6b29a319a84157f8e8c5fa1281a: Status 404 returned error can't find the container with id 017ae2d23a16a6d924d4f26b6f70ecfc673ea6b29a319a84157f8e8c5fa1281a Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.725329 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-c6rqt"] Nov 22 09:17:34 crc kubenswrapper[4693]: W1122 09:17:34.726443 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0917b65_1f5c_4c04_8189_a070d8409c64.slice/crio-5d8fcbdd8bac4fe7b284deef7d5d345f241513a756464dff7272f75151fc8f4d WatchSource:0}: Error finding container 5d8fcbdd8bac4fe7b284deef7d5d345f241513a756464dff7272f75151fc8f4d: Status 404 returned error can't find the container with id 5d8fcbdd8bac4fe7b284deef7d5d345f241513a756464dff7272f75151fc8f4d Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.818628 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-178d-account-create-swm6h"] Nov 22 09:17:34 crc kubenswrapper[4693]: W1122 09:17:34.846289 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd7451501_ca2e_42fc_884b_fc0ca2fa393e.slice/crio-fd71d698b175bb3047c674c73d711cc72a15f1a4096fa5838a1ef94337faf22a WatchSource:0}: Error finding container fd71d698b175bb3047c674c73d711cc72a15f1a4096fa5838a1ef94337faf22a: Status 404 returned error can't find the container with id fd71d698b175bb3047c674c73d711cc72a15f1a4096fa5838a1ef94337faf22a Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.977627 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6rqt" event={"ID":"f0917b65-1f5c-4c04-8189-a070d8409c64","Type":"ContainerStarted","Data":"5d8fcbdd8bac4fe7b284deef7d5d345f241513a756464dff7272f75151fc8f4d"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.978627 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-178d-account-create-swm6h" event={"ID":"d7451501-ca2e-42fc-884b-fc0ca2fa393e","Type":"ContainerStarted","Data":"fd71d698b175bb3047c674c73d711cc72a15f1a4096fa5838a1ef94337faf22a"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.980193 4693 generic.go:334] "Generic (PLEG): container finished" podID="956e2855-d931-4003-b10c-ecb7712d4793" containerID="e6701e1c9e385cbf112da03e925b1750253f22899795ac22018c98e6baf1d609" exitCode=0 Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.980223 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-mpg6l" event={"ID":"956e2855-d931-4003-b10c-ecb7712d4793","Type":"ContainerDied","Data":"e6701e1c9e385cbf112da03e925b1750253f22899795ac22018c98e6baf1d609"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.980258 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-mpg6l" event={"ID":"956e2855-d931-4003-b10c-ecb7712d4793","Type":"ContainerStarted","Data":"0c42f01fea0e66c78b13d1c99ecc862e329f69d8b2f1cc814c490969dc5a9d73"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.981706 4693 generic.go:334] "Generic (PLEG): container finished" podID="9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" containerID="48d7f1b9410084beed5c4bfde3daa82178a1f319ffe68f5bb6aa4229e8c76d7d" exitCode=0 Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.981773 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7bss2" event={"ID":"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207","Type":"ContainerDied","Data":"48d7f1b9410084beed5c4bfde3daa82178a1f319ffe68f5bb6aa4229e8c76d7d"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.981795 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7bss2" event={"ID":"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207","Type":"ContainerStarted","Data":"bd8d1ce188e4b4e8e799f1b061d328a17d61e1fbfa7378faab78b3feb987e88a"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.983063 4693 generic.go:334] "Generic (PLEG): container finished" podID="8c63c124-8c2a-4bde-8a4d-50441a10ba02" containerID="ef7fe073b128c6c52974d54cc3ff49bf804bf338d34a988f4f6f08bedd803563" exitCode=0 Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.983134 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b5a6-account-create-gb6vh" event={"ID":"8c63c124-8c2a-4bde-8a4d-50441a10ba02","Type":"ContainerDied","Data":"ef7fe073b128c6c52974d54cc3ff49bf804bf338d34a988f4f6f08bedd803563"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.983159 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b5a6-account-create-gb6vh" event={"ID":"8c63c124-8c2a-4bde-8a4d-50441a10ba02","Type":"ContainerStarted","Data":"cd3deab17f99ab32cbc0d9c9cb9cc9da079bb6c3288031afbe69db1b92905e4f"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.984862 4693 generic.go:334] "Generic (PLEG): container finished" podID="4b7bc687-9339-4ddd-a589-a23bb9880872" containerID="2042e6179f43d538bc458776d10523f2ea26f48ae58965477d95716c30533a5f" exitCode=0 Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.984927 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-kc2bz" event={"ID":"4b7bc687-9339-4ddd-a589-a23bb9880872","Type":"ContainerDied","Data":"2042e6179f43d538bc458776d10523f2ea26f48ae58965477d95716c30533a5f"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.984949 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-kc2bz" event={"ID":"4b7bc687-9339-4ddd-a589-a23bb9880872","Type":"ContainerStarted","Data":"017ae2d23a16a6d924d4f26b6f70ecfc673ea6b29a319a84157f8e8c5fa1281a"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.986624 4693 generic.go:334] "Generic (PLEG): container finished" podID="c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" containerID="47a89b32958565c3f16483178caa6a9d530c2af9b2095036fcc883f93922d363" exitCode=0 Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.986720 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2170-account-create-t2wl8" event={"ID":"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e","Type":"ContainerDied","Data":"47a89b32958565c3f16483178caa6a9d530c2af9b2095036fcc883f93922d363"} Nov 22 09:17:34 crc kubenswrapper[4693]: I1122 09:17:34.986816 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2170-account-create-t2wl8" event={"ID":"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e","Type":"ContainerStarted","Data":"07fe3f62365a1d8c33bf30a3f5ae2dc6904c7d8398cb2fd81ba43037d0a223b9"} Nov 22 09:17:35 crc kubenswrapper[4693]: I1122 09:17:35.995100 4693 generic.go:334] "Generic (PLEG): container finished" podID="d7451501-ca2e-42fc-884b-fc0ca2fa393e" containerID="943305a5ec8698210df9f8229165fca8538eb6c11e6514a7cbd4be236ea344a1" exitCode=0 Nov 22 09:17:35 crc kubenswrapper[4693]: I1122 09:17:35.995195 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-178d-account-create-swm6h" event={"ID":"d7451501-ca2e-42fc-884b-fc0ca2fa393e","Type":"ContainerDied","Data":"943305a5ec8698210df9f8229165fca8538eb6c11e6514a7cbd4be236ea344a1"} Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.374871 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.497130 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c63c124-8c2a-4bde-8a4d-50441a10ba02-operator-scripts\") pod \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.497181 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkwdm\" (UniqueName: \"kubernetes.io/projected/8c63c124-8c2a-4bde-8a4d-50441a10ba02-kube-api-access-xkwdm\") pod \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\" (UID: \"8c63c124-8c2a-4bde-8a4d-50441a10ba02\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.497717 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c63c124-8c2a-4bde-8a4d-50441a10ba02-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8c63c124-8c2a-4bde-8a4d-50441a10ba02" (UID: "8c63c124-8c2a-4bde-8a4d-50441a10ba02"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.504169 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c63c124-8c2a-4bde-8a4d-50441a10ba02-kube-api-access-xkwdm" (OuterVolumeSpecName: "kube-api-access-xkwdm") pod "8c63c124-8c2a-4bde-8a4d-50441a10ba02" (UID: "8c63c124-8c2a-4bde-8a4d-50441a10ba02"). InnerVolumeSpecName "kube-api-access-xkwdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.553055 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.557503 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.561231 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.565682 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.600059 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c63c124-8c2a-4bde-8a4d-50441a10ba02-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.600089 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkwdm\" (UniqueName: \"kubernetes.io/projected/8c63c124-8c2a-4bde-8a4d-50441a10ba02-kube-api-access-xkwdm\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701033 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4949v\" (UniqueName: \"kubernetes.io/projected/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-kube-api-access-4949v\") pod \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701068 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmx6z\" (UniqueName: \"kubernetes.io/projected/956e2855-d931-4003-b10c-ecb7712d4793-kube-api-access-vmx6z\") pod \"956e2855-d931-4003-b10c-ecb7712d4793\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701117 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-operator-scripts\") pod \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\" (UID: \"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701144 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fls92\" (UniqueName: \"kubernetes.io/projected/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-kube-api-access-fls92\") pod \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701205 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9r8mk\" (UniqueName: \"kubernetes.io/projected/4b7bc687-9339-4ddd-a589-a23bb9880872-kube-api-access-9r8mk\") pod \"4b7bc687-9339-4ddd-a589-a23bb9880872\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701247 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-operator-scripts\") pod \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\" (UID: \"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701343 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e2855-d931-4003-b10c-ecb7712d4793-operator-scripts\") pod \"956e2855-d931-4003-b10c-ecb7712d4793\" (UID: \"956e2855-d931-4003-b10c-ecb7712d4793\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.701376 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7bc687-9339-4ddd-a589-a23bb9880872-operator-scripts\") pod \"4b7bc687-9339-4ddd-a589-a23bb9880872\" (UID: \"4b7bc687-9339-4ddd-a589-a23bb9880872\") " Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.702335 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/956e2855-d931-4003-b10c-ecb7712d4793-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "956e2855-d931-4003-b10c-ecb7712d4793" (UID: "956e2855-d931-4003-b10c-ecb7712d4793"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.702343 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" (UID: "9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.702451 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" (UID: "c0410ad7-7eac-4d62-96e3-4dfa4c718f9e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.702734 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4b7bc687-9339-4ddd-a589-a23bb9880872-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4b7bc687-9339-4ddd-a589-a23bb9880872" (UID: "4b7bc687-9339-4ddd-a589-a23bb9880872"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.704666 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b7bc687-9339-4ddd-a589-a23bb9880872-kube-api-access-9r8mk" (OuterVolumeSpecName: "kube-api-access-9r8mk") pod "4b7bc687-9339-4ddd-a589-a23bb9880872" (UID: "4b7bc687-9339-4ddd-a589-a23bb9880872"). InnerVolumeSpecName "kube-api-access-9r8mk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.705687 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/956e2855-d931-4003-b10c-ecb7712d4793-kube-api-access-vmx6z" (OuterVolumeSpecName: "kube-api-access-vmx6z") pod "956e2855-d931-4003-b10c-ecb7712d4793" (UID: "956e2855-d931-4003-b10c-ecb7712d4793"). InnerVolumeSpecName "kube-api-access-vmx6z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.705722 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-kube-api-access-4949v" (OuterVolumeSpecName: "kube-api-access-4949v") pod "9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" (UID: "9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207"). InnerVolumeSpecName "kube-api-access-4949v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.705795 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-kube-api-access-fls92" (OuterVolumeSpecName: "kube-api-access-fls92") pod "c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" (UID: "c0410ad7-7eac-4d62-96e3-4dfa4c718f9e"). InnerVolumeSpecName "kube-api-access-fls92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803514 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/956e2855-d931-4003-b10c-ecb7712d4793-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803563 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4b7bc687-9339-4ddd-a589-a23bb9880872-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803573 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4949v\" (UniqueName: \"kubernetes.io/projected/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-kube-api-access-4949v\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803583 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmx6z\" (UniqueName: \"kubernetes.io/projected/956e2855-d931-4003-b10c-ecb7712d4793-kube-api-access-vmx6z\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803592 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803600 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fls92\" (UniqueName: \"kubernetes.io/projected/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-kube-api-access-fls92\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803609 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9r8mk\" (UniqueName: \"kubernetes.io/projected/4b7bc687-9339-4ddd-a589-a23bb9880872-kube-api-access-9r8mk\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:36 crc kubenswrapper[4693]: I1122 09:17:36.803617 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.003391 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-mpg6l" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.003450 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-mpg6l" event={"ID":"956e2855-d931-4003-b10c-ecb7712d4793","Type":"ContainerDied","Data":"0c42f01fea0e66c78b13d1c99ecc862e329f69d8b2f1cc814c490969dc5a9d73"} Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.003512 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0c42f01fea0e66c78b13d1c99ecc862e329f69d8b2f1cc814c490969dc5a9d73" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.005172 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-7bss2" event={"ID":"9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207","Type":"ContainerDied","Data":"bd8d1ce188e4b4e8e799f1b061d328a17d61e1fbfa7378faab78b3feb987e88a"} Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.005220 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd8d1ce188e4b4e8e799f1b061d328a17d61e1fbfa7378faab78b3feb987e88a" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.005411 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-7bss2" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.006397 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-b5a6-account-create-gb6vh" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.006397 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-b5a6-account-create-gb6vh" event={"ID":"8c63c124-8c2a-4bde-8a4d-50441a10ba02","Type":"ContainerDied","Data":"cd3deab17f99ab32cbc0d9c9cb9cc9da079bb6c3288031afbe69db1b92905e4f"} Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.006449 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd3deab17f99ab32cbc0d9c9cb9cc9da079bb6c3288031afbe69db1b92905e4f" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.007734 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-kc2bz" event={"ID":"4b7bc687-9339-4ddd-a589-a23bb9880872","Type":"ContainerDied","Data":"017ae2d23a16a6d924d4f26b6f70ecfc673ea6b29a319a84157f8e8c5fa1281a"} Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.007765 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-kc2bz" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.007765 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="017ae2d23a16a6d924d4f26b6f70ecfc673ea6b29a319a84157f8e8c5fa1281a" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.008789 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-2170-account-create-t2wl8" event={"ID":"c0410ad7-7eac-4d62-96e3-4dfa4c718f9e","Type":"ContainerDied","Data":"07fe3f62365a1d8c33bf30a3f5ae2dc6904c7d8398cb2fd81ba43037d0a223b9"} Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.008818 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="07fe3f62365a1d8c33bf30a3f5ae2dc6904c7d8398cb2fd81ba43037d0a223b9" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.008801 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-2170-account-create-t2wl8" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.213977 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.312364 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7451501-ca2e-42fc-884b-fc0ca2fa393e-operator-scripts\") pod \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.312580 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q5v49\" (UniqueName: \"kubernetes.io/projected/d7451501-ca2e-42fc-884b-fc0ca2fa393e-kube-api-access-q5v49\") pod \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\" (UID: \"d7451501-ca2e-42fc-884b-fc0ca2fa393e\") " Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.313584 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d7451501-ca2e-42fc-884b-fc0ca2fa393e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d7451501-ca2e-42fc-884b-fc0ca2fa393e" (UID: "d7451501-ca2e-42fc-884b-fc0ca2fa393e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.319276 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d7451501-ca2e-42fc-884b-fc0ca2fa393e-kube-api-access-q5v49" (OuterVolumeSpecName: "kube-api-access-q5v49") pod "d7451501-ca2e-42fc-884b-fc0ca2fa393e" (UID: "d7451501-ca2e-42fc-884b-fc0ca2fa393e"). InnerVolumeSpecName "kube-api-access-q5v49". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.415493 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d7451501-ca2e-42fc-884b-fc0ca2fa393e-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:37 crc kubenswrapper[4693]: I1122 09:17:37.415530 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q5v49\" (UniqueName: \"kubernetes.io/projected/d7451501-ca2e-42fc-884b-fc0ca2fa393e-kube-api-access-q5v49\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.018826 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-178d-account-create-swm6h" event={"ID":"d7451501-ca2e-42fc-884b-fc0ca2fa393e","Type":"ContainerDied","Data":"fd71d698b175bb3047c674c73d711cc72a15f1a4096fa5838a1ef94337faf22a"} Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.018878 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-178d-account-create-swm6h" Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.018889 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fd71d698b175bb3047c674c73d711cc72a15f1a4096fa5838a1ef94337faf22a" Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.554580 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.593832 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-cnw8s"] Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.594204 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="dnsmasq-dns" containerID="cri-o://78a60223ccc9e4edc2f9cacb551457f116229029fb8495ce70cb1607e236e96e" gracePeriod=10 Nov 22 09:17:38 crc kubenswrapper[4693]: I1122 09:17:38.837706 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.117:5353: connect: connection refused" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.028748 4693 generic.go:334] "Generic (PLEG): container finished" podID="e2244b23-21f6-4be8-a18d-9204f757709a" containerID="78a60223ccc9e4edc2f9cacb551457f116229029fb8495ce70cb1607e236e96e" exitCode=0 Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.028790 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" event={"ID":"e2244b23-21f6-4be8-a18d-9204f757709a","Type":"ContainerDied","Data":"78a60223ccc9e4edc2f9cacb551457f116229029fb8495ce70cb1607e236e96e"} Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.675949 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.753140 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-dns-svc\") pod \"e2244b23-21f6-4be8-a18d-9204f757709a\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.787598 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e2244b23-21f6-4be8-a18d-9204f757709a" (UID: "e2244b23-21f6-4be8-a18d-9204f757709a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.854062 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-sb\") pod \"e2244b23-21f6-4be8-a18d-9204f757709a\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.854186 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-nb\") pod \"e2244b23-21f6-4be8-a18d-9204f757709a\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.854211 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cj8x6\" (UniqueName: \"kubernetes.io/projected/e2244b23-21f6-4be8-a18d-9204f757709a-kube-api-access-cj8x6\") pod \"e2244b23-21f6-4be8-a18d-9204f757709a\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.854229 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-config\") pod \"e2244b23-21f6-4be8-a18d-9204f757709a\" (UID: \"e2244b23-21f6-4be8-a18d-9204f757709a\") " Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.854407 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.857158 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e2244b23-21f6-4be8-a18d-9204f757709a-kube-api-access-cj8x6" (OuterVolumeSpecName: "kube-api-access-cj8x6") pod "e2244b23-21f6-4be8-a18d-9204f757709a" (UID: "e2244b23-21f6-4be8-a18d-9204f757709a"). InnerVolumeSpecName "kube-api-access-cj8x6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.882128 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e2244b23-21f6-4be8-a18d-9204f757709a" (UID: "e2244b23-21f6-4be8-a18d-9204f757709a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.888438 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e2244b23-21f6-4be8-a18d-9204f757709a" (UID: "e2244b23-21f6-4be8-a18d-9204f757709a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.889221 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-config" (OuterVolumeSpecName: "config") pod "e2244b23-21f6-4be8-a18d-9204f757709a" (UID: "e2244b23-21f6-4be8-a18d-9204f757709a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.955280 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.955309 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cj8x6\" (UniqueName: \"kubernetes.io/projected/e2244b23-21f6-4be8-a18d-9204f757709a-kube-api-access-cj8x6\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.955323 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:39 crc kubenswrapper[4693]: I1122 09:17:39.955331 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e2244b23-21f6-4be8-a18d-9204f757709a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.035342 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6rqt" event={"ID":"f0917b65-1f5c-4c04-8189-a070d8409c64","Type":"ContainerStarted","Data":"ac54efd1cae29da2ce5c5ce4eddff8bd6b7ea698aef39cd58a921cf312509c04"} Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.038384 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" event={"ID":"e2244b23-21f6-4be8-a18d-9204f757709a","Type":"ContainerDied","Data":"d9d2cb59b5fa514bb49767ef7d39534b78715fe1b712338352d91c82d33a8f69"} Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.038416 4693 scope.go:117] "RemoveContainer" containerID="78a60223ccc9e4edc2f9cacb551457f116229029fb8495ce70cb1607e236e96e" Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.038502 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cf8bcbfcf-cnw8s" Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.062885 4693 scope.go:117] "RemoveContainer" containerID="6f8bc043c6cc8e7e66316de4c0e84dc34dd644896314cdeefb583fecaabff998" Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.075289 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-c6rqt" podStartSLOduration=2.332324347 podStartE2EDuration="7.075279373s" podCreationTimestamp="2025-11-22 09:17:33 +0000 UTC" firstStartedPulling="2025-11-22 09:17:34.728940174 +0000 UTC m=+850.871442466" lastFinishedPulling="2025-11-22 09:17:39.471895202 +0000 UTC m=+855.614397492" observedRunningTime="2025-11-22 09:17:40.061481563 +0000 UTC m=+856.203983854" watchObservedRunningTime="2025-11-22 09:17:40.075279373 +0000 UTC m=+856.217781664" Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.077528 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-cnw8s"] Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.081720 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cf8bcbfcf-cnw8s"] Nov 22 09:17:40 crc kubenswrapper[4693]: I1122 09:17:40.155257 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" path="/var/lib/kubelet/pods/e2244b23-21f6-4be8-a18d-9204f757709a/volumes" Nov 22 09:17:42 crc kubenswrapper[4693]: I1122 09:17:42.053303 4693 generic.go:334] "Generic (PLEG): container finished" podID="f0917b65-1f5c-4c04-8189-a070d8409c64" containerID="ac54efd1cae29da2ce5c5ce4eddff8bd6b7ea698aef39cd58a921cf312509c04" exitCode=0 Nov 22 09:17:42 crc kubenswrapper[4693]: I1122 09:17:42.053403 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6rqt" event={"ID":"f0917b65-1f5c-4c04-8189-a070d8409c64","Type":"ContainerDied","Data":"ac54efd1cae29da2ce5c5ce4eddff8bd6b7ea698aef39cd58a921cf312509c04"} Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.332265 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.414517 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-config-data\") pod \"f0917b65-1f5c-4c04-8189-a070d8409c64\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.414750 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-combined-ca-bundle\") pod \"f0917b65-1f5c-4c04-8189-a070d8409c64\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.414786 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qnb9c\" (UniqueName: \"kubernetes.io/projected/f0917b65-1f5c-4c04-8189-a070d8409c64-kube-api-access-qnb9c\") pod \"f0917b65-1f5c-4c04-8189-a070d8409c64\" (UID: \"f0917b65-1f5c-4c04-8189-a070d8409c64\") " Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.421970 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0917b65-1f5c-4c04-8189-a070d8409c64-kube-api-access-qnb9c" (OuterVolumeSpecName: "kube-api-access-qnb9c") pod "f0917b65-1f5c-4c04-8189-a070d8409c64" (UID: "f0917b65-1f5c-4c04-8189-a070d8409c64"). InnerVolumeSpecName "kube-api-access-qnb9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.436339 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f0917b65-1f5c-4c04-8189-a070d8409c64" (UID: "f0917b65-1f5c-4c04-8189-a070d8409c64"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.449699 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-config-data" (OuterVolumeSpecName: "config-data") pod "f0917b65-1f5c-4c04-8189-a070d8409c64" (UID: "f0917b65-1f5c-4c04-8189-a070d8409c64"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.517098 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.517143 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0917b65-1f5c-4c04-8189-a070d8409c64-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:43 crc kubenswrapper[4693]: I1122 09:17:43.517157 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qnb9c\" (UniqueName: \"kubernetes.io/projected/f0917b65-1f5c-4c04-8189-a070d8409c64-kube-api-access-qnb9c\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.069900 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-c6rqt" event={"ID":"f0917b65-1f5c-4c04-8189-a070d8409c64","Type":"ContainerDied","Data":"5d8fcbdd8bac4fe7b284deef7d5d345f241513a756464dff7272f75151fc8f4d"} Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.069944 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5d8fcbdd8bac4fe7b284deef7d5d345f241513a756464dff7272f75151fc8f4d" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.070000 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-c6rqt" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287513 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5cd88b5f5c-rf9vc"] Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287792 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d7451501-ca2e-42fc-884b-fc0ca2fa393e" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287811 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d7451501-ca2e-42fc-884b-fc0ca2fa393e" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287820 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="init" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287826 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="init" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287856 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b7bc687-9339-4ddd-a589-a23bb9880872" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287862 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b7bc687-9339-4ddd-a589-a23bb9880872" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287876 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="dnsmasq-dns" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287882 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="dnsmasq-dns" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287893 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c63c124-8c2a-4bde-8a4d-50441a10ba02" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287898 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c63c124-8c2a-4bde-8a4d-50441a10ba02" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287905 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287911 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287920 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0917b65-1f5c-4c04-8189-a070d8409c64" containerName="keystone-db-sync" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287925 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0917b65-1f5c-4c04-8189-a070d8409c64" containerName="keystone-db-sync" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287936 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287941 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: E1122 09:17:44.287948 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="956e2855-d931-4003-b10c-ecb7712d4793" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.287954 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="956e2855-d931-4003-b10c-ecb7712d4793" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288071 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288083 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c63c124-8c2a-4bde-8a4d-50441a10ba02" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288143 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b7bc687-9339-4ddd-a589-a23bb9880872" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288152 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="956e2855-d931-4003-b10c-ecb7712d4793" containerName="mariadb-database-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288162 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e2244b23-21f6-4be8-a18d-9204f757709a" containerName="dnsmasq-dns" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288173 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0917b65-1f5c-4c04-8189-a070d8409c64" containerName="keystone-db-sync" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288180 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d7451501-ca2e-42fc-884b-fc0ca2fa393e" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.288186 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" containerName="mariadb-account-create" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.289048 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.306688 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5cd88b5f5c-rf9vc"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.348723 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-22fj8"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.356789 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.361554 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.361702 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.361803 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.361981 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k575m" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.362322 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.370708 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-22fj8"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.430637 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8dnk\" (UniqueName: \"kubernetes.io/projected/8ea632be-fc87-4e83-85b3-b52e3ca01560-kube-api-access-z8dnk\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.430679 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-config\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.430725 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-nb\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.430760 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-sb\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.430800 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-svc\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.430819 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-swift-storage-0\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.497179 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6bb9684445-rmchz"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.500796 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.507567 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.508019 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.508301 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.508540 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-z6p77" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.559777 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-combined-ca-bundle\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.559831 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-fernet-keys\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.559902 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-config-data\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.559945 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8dnk\" (UniqueName: \"kubernetes.io/projected/8ea632be-fc87-4e83-85b3-b52e3ca01560-kube-api-access-z8dnk\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.559967 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-config\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.559987 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-nb\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560017 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-sb\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560051 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lg8wm\" (UniqueName: \"kubernetes.io/projected/9b1d157d-be5c-47fc-9325-20373433ad4c-kube-api-access-lg8wm\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560078 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-svc\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560098 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-swift-storage-0\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560117 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-credential-keys\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560138 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-scripts\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.560666 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6bb9684445-rmchz"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.561171 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-config\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.561944 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-nb\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.563534 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-swift-storage-0\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.563713 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-svc\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.567034 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-sb\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.585021 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-ccc76"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.586148 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.599231 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8dnk\" (UniqueName: \"kubernetes.io/projected/8ea632be-fc87-4e83-85b3-b52e3ca01560-kube-api-access-z8dnk\") pod \"dnsmasq-dns-5cd88b5f5c-rf9vc\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.599618 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.599933 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-cjrhh" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.600054 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.606798 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-ccc76"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.608665 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.628593 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-qjqt5"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.629637 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.635048 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nt99g" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.635173 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.635353 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.649957 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qjqt5"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662016 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lg8wm\" (UniqueName: \"kubernetes.io/projected/9b1d157d-be5c-47fc-9325-20373433ad4c-kube-api-access-lg8wm\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662065 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-config-data\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662111 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-credential-keys\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662146 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-scripts\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662177 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6acf2b70-484b-4cae-97e0-2a999ef7dcef-logs\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662207 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-combined-ca-bundle\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662227 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-fernet-keys\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662245 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-config-data\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662267 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-scripts\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662300 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6acf2b70-484b-4cae-97e0-2a999ef7dcef-horizon-secret-key\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.662342 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8574h\" (UniqueName: \"kubernetes.io/projected/6acf2b70-484b-4cae-97e0-2a999ef7dcef-kube-api-access-8574h\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.679564 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-fernet-keys\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.682321 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-6c46c9cb9c-wcdw2"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.683959 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.695181 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c46c9cb9c-wcdw2"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.697514 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-config-data\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.698738 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-credential-keys\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.703151 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-scripts\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.703406 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-combined-ca-bundle\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.708807 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lg8wm\" (UniqueName: \"kubernetes.io/projected/9b1d157d-be5c-47fc-9325-20373433ad4c-kube-api-access-lg8wm\") pod \"keystone-bootstrap-22fj8\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.740588 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-hvt8s"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.741831 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.747330 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bhk4q" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.752433 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764080 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-scripts\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764135 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8574h\" (UniqueName: \"kubernetes.io/projected/6acf2b70-484b-4cae-97e0-2a999ef7dcef-kube-api-access-8574h\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764155 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qg4fm\" (UniqueName: \"kubernetes.io/projected/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-kube-api-access-qg4fm\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764204 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-config-data\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764240 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-config\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764276 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-config-data\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764298 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7353446-2b8c-48fc-8267-42b6f3ac0502-logs\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764334 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-scripts\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764361 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-combined-ca-bundle\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764381 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7353446-2b8c-48fc-8267-42b6f3ac0502-horizon-secret-key\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764401 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6acf2b70-484b-4cae-97e0-2a999ef7dcef-logs\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764420 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-db-sync-config-data\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764441 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-etc-machine-id\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764467 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-scripts\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764489 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q2kwg\" (UniqueName: \"kubernetes.io/projected/e7353446-2b8c-48fc-8267-42b6f3ac0502-kube-api-access-q2kwg\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764509 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-config-data\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764528 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6acf2b70-484b-4cae-97e0-2a999ef7dcef-horizon-secret-key\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764556 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sd9qh\" (UniqueName: \"kubernetes.io/projected/977876f3-91a6-4f81-9c62-b7d5fc9b1508-kube-api-access-sd9qh\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.764571 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-combined-ca-bundle\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.770760 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-scripts\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.770962 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-config-data\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.771021 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6acf2b70-484b-4cae-97e0-2a999ef7dcef-logs\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.778765 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6acf2b70-484b-4cae-97e0-2a999ef7dcef-horizon-secret-key\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.788322 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.790326 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.797639 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8574h\" (UniqueName: \"kubernetes.io/projected/6acf2b70-484b-4cae-97e0-2a999ef7dcef-kube-api-access-8574h\") pod \"horizon-6bb9684445-rmchz\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.797866 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.797958 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.798259 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.798435 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cwf55" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.809148 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-hvt8s"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.832501 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.861031 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866176 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7353446-2b8c-48fc-8267-42b6f3ac0502-horizon-secret-key\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866238 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-db-sync-config-data\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866266 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-db-sync-config-data\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866290 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-etc-machine-id\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866324 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q2kwg\" (UniqueName: \"kubernetes.io/projected/e7353446-2b8c-48fc-8267-42b6f3ac0502-kube-api-access-q2kwg\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866347 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-config-data\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866363 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psdtp\" (UniqueName: \"kubernetes.io/projected/c37ecde5-29e1-4377-95be-2b7da9e65110-kube-api-access-psdtp\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866390 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-combined-ca-bundle\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866409 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sd9qh\" (UniqueName: \"kubernetes.io/projected/977876f3-91a6-4f81-9c62-b7d5fc9b1508-kube-api-access-sd9qh\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866426 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-combined-ca-bundle\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866448 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-scripts\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866469 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qg4fm\" (UniqueName: \"kubernetes.io/projected/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-kube-api-access-qg4fm\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866494 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-config-data\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866528 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-config\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866556 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7353446-2b8c-48fc-8267-42b6f3ac0502-logs\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866587 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-scripts\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.866609 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-combined-ca-bundle\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.868981 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cd88b5f5c-rf9vc"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.870006 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-combined-ca-bundle\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.873625 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7353446-2b8c-48fc-8267-42b6f3ac0502-horizon-secret-key\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.874707 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-etc-machine-id\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.875005 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7353446-2b8c-48fc-8267-42b6f3ac0502-logs\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.875721 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-config-data\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.876187 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-scripts\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.876688 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-scripts\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.881943 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-db-sync-config-data\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.884500 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.884919 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-config-data\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.885605 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-combined-ca-bundle\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.887039 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.890783 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.890859 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-config\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.891102 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.897656 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q2kwg\" (UniqueName: \"kubernetes.io/projected/e7353446-2b8c-48fc-8267-42b6f3ac0502-kube-api-access-q2kwg\") pod \"horizon-6c46c9cb9c-wcdw2\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.898971 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qg4fm\" (UniqueName: \"kubernetes.io/projected/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-kube-api-access-qg4fm\") pod \"cinder-db-sync-qjqt5\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.903446 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sd9qh\" (UniqueName: \"kubernetes.io/projected/977876f3-91a6-4f81-9c62-b7d5fc9b1508-kube-api-access-sd9qh\") pod \"neutron-db-sync-ccc76\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.928120 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.934617 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-fhrhl"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.935977 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.939811 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mjz8r" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.940302 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.940614 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.944237 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-fhrhl"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.948448 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-798745f775-l9pkd"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.950821 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.956953 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.959756 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.961008 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-798745f775-l9pkd"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.963936 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.964060 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.966873 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.967737 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.967770 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968282 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968351 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-logs\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968411 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968485 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-db-sync-config-data\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968525 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968577 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-scripts\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968601 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968626 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psdtp\" (UniqueName: \"kubernetes.io/projected/c37ecde5-29e1-4377-95be-2b7da9e65110-kube-api-access-psdtp\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968654 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-config-data\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968676 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-combined-ca-bundle\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968694 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqfwf\" (UniqueName: \"kubernetes.io/projected/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-kube-api-access-qqfwf\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968727 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f6tgh\" (UniqueName: \"kubernetes.io/projected/f1248b42-dbbb-4f71-9691-4d37f5cee00f-kube-api-access-f6tgh\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968742 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-scripts\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968771 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-log-httpd\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.968792 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-config-data\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.971329 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-run-httpd\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.973020 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-combined-ca-bundle\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.975032 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-db-sync-config-data\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.976251 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:44 crc kubenswrapper[4693]: I1122 09:17:44.986225 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psdtp\" (UniqueName: \"kubernetes.io/projected/c37ecde5-29e1-4377-95be-2b7da9e65110-kube-api-access-psdtp\") pod \"barbican-db-sync-hvt8s\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.096509 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099571 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099613 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-config-data\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099635 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-scripts\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099651 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099674 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppvln\" (UniqueName: \"kubernetes.io/projected/fa77cb55-27dd-45f4-bf88-08e359c1b337-kube-api-access-ppvln\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099691 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-svc\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099711 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-config-data\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099729 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqfwf\" (UniqueName: \"kubernetes.io/projected/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-kube-api-access-qqfwf\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099747 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f6tgh\" (UniqueName: \"kubernetes.io/projected/f1248b42-dbbb-4f71-9691-4d37f5cee00f-kube-api-access-f6tgh\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099774 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-scripts\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099790 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-log-httpd\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099805 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-scripts\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099821 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-config-data\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.099836 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104033 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-run-httpd\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104117 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-swift-storage-0\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104203 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104282 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knn99\" (UniqueName: \"kubernetes.io/projected/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-kube-api-access-knn99\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104347 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104414 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104479 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-logs\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104554 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104625 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-nb\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104697 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-logs\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104756 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-sb\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104828 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.104935 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gc8rm\" (UniqueName: \"kubernetes.io/projected/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-kube-api-access-gc8rm\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.107880 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-config\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.107972 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-logs\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.108034 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.108111 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.108187 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.108281 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-combined-ca-bundle\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.108352 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.109459 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-scripts\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.109661 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.111221 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-log-httpd\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.114232 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-run-httpd\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.128424 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.135539 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.136072 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-config-data\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.136533 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-config-data\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.148574 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.149659 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-logs\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.152093 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.155993 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-scripts\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.161818 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.172651 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqfwf\" (UniqueName: \"kubernetes.io/projected/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-kube-api-access-qqfwf\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.173079 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.173669 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f6tgh\" (UniqueName: \"kubernetes.io/projected/f1248b42-dbbb-4f71-9691-4d37f5cee00f-kube-api-access-f6tgh\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.173721 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.174430 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.223974 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-scripts\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224020 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224140 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-swift-storage-0\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224163 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224191 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knn99\" (UniqueName: \"kubernetes.io/projected/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-kube-api-access-knn99\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224240 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-logs\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224271 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224292 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-nb\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224343 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-logs\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224356 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-sb\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224418 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gc8rm\" (UniqueName: \"kubernetes.io/projected/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-kube-api-access-gc8rm\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224432 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-config\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224469 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224515 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224543 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-combined-ca-bundle\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224605 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224681 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-config-data\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224716 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppvln\" (UniqueName: \"kubernetes.io/projected/fa77cb55-27dd-45f4-bf88-08e359c1b337-kube-api-access-ppvln\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.224731 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-svc\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.225445 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-svc\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.225687 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.228510 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-config\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.228886 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-logs\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.228977 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-logs\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.229102 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.229169 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-nb\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.229719 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.231769 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-sb\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.239645 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-swift-storage-0\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.245018 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.248407 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-combined-ca-bundle\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.248464 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.255003 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-scripts\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.270812 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppvln\" (UniqueName: \"kubernetes.io/projected/fa77cb55-27dd-45f4-bf88-08e359c1b337-kube-api-access-ppvln\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.277413 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-scripts\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.284539 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knn99\" (UniqueName: \"kubernetes.io/projected/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-kube-api-access-knn99\") pod \"dnsmasq-dns-798745f775-l9pkd\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.286524 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gc8rm\" (UniqueName: \"kubernetes.io/projected/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-kube-api-access-gc8rm\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.289742 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cd88b5f5c-rf9vc"] Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.294044 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.294898 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-config-data\") pod \"placement-db-sync-fhrhl\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.296721 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-config-data\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.323694 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6bb9684445-rmchz"] Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.324210 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.483238 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.551919 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fhrhl" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.577708 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.591971 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.675769 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-22fj8"] Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.792225 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-ccc76"] Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.864000 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.886946 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-hvt8s"] Nov 22 09:17:45 crc kubenswrapper[4693]: W1122 09:17:45.896637 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod079d82bf_3312_4f7f_8ef8_6d55b20c8f5d.slice/crio-38fc368c6834eaa71626b74a4acf077e8c8881fea8ee04f37776ac1fb11a3ae2 WatchSource:0}: Error finding container 38fc368c6834eaa71626b74a4acf077e8c8881fea8ee04f37776ac1fb11a3ae2: Status 404 returned error can't find the container with id 38fc368c6834eaa71626b74a4acf077e8c8881fea8ee04f37776ac1fb11a3ae2 Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.904762 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-6c46c9cb9c-wcdw2"] Nov 22 09:17:45 crc kubenswrapper[4693]: I1122 09:17:45.928552 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-qjqt5"] Nov 22 09:17:45 crc kubenswrapper[4693]: W1122 09:17:45.931329 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbe72ccb6_821d_4b5a_a4d0_6d866fc617f6.slice/crio-00f1c73ba54ff544e2bbbe6971b28a10af3a86bf9a9a38948b241c43b88365fa WatchSource:0}: Error finding container 00f1c73ba54ff544e2bbbe6971b28a10af3a86bf9a9a38948b241c43b88365fa: Status 404 returned error can't find the container with id 00f1c73ba54ff544e2bbbe6971b28a10af3a86bf9a9a38948b241c43b88365fa Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.114738 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.130228 4693 generic.go:334] "Generic (PLEG): container finished" podID="8ea632be-fc87-4e83-85b3-b52e3ca01560" containerID="9bdb00c3d162a2ae6a2102548d1f9149ab49cdb4195d7e25595ec9eefba6e30b" exitCode=0 Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.130293 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" event={"ID":"8ea632be-fc87-4e83-85b3-b52e3ca01560","Type":"ContainerDied","Data":"9bdb00c3d162a2ae6a2102548d1f9149ab49cdb4195d7e25595ec9eefba6e30b"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.130323 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" event={"ID":"8ea632be-fc87-4e83-85b3-b52e3ca01560","Type":"ContainerStarted","Data":"27043f1ffab157fe3e09d832f47c448be5d53a2e86f60aa1bce7b3854d4046bd"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.133376 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-798745f775-l9pkd"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.138184 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerStarted","Data":"38fc368c6834eaa71626b74a4acf077e8c8881fea8ee04f37776ac1fb11a3ae2"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.141647 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-fhrhl"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.147263 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qjqt5" event={"ID":"be72ccb6-821d-4b5a-a4d0-6d866fc617f6","Type":"ContainerStarted","Data":"00f1c73ba54ff544e2bbbe6971b28a10af3a86bf9a9a38948b241c43b88365fa"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.183462 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-22fj8" podStartSLOduration=2.183445576 podStartE2EDuration="2.183445576s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:46.17118125 +0000 UTC m=+862.313683541" watchObservedRunningTime="2025-11-22 09:17:46.183445576 +0000 UTC m=+862.325947868" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.195673 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6bb9684445-rmchz" event={"ID":"6acf2b70-484b-4cae-97e0-2a999ef7dcef","Type":"ContainerStarted","Data":"a49323af211301011c735148594b6765b684dbb29975a1f98d8478c50e5a138b"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.195701 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22fj8" event={"ID":"9b1d157d-be5c-47fc-9325-20373433ad4c","Type":"ContainerStarted","Data":"66223a546c5fee881b8313aef5aa8941b7f04f1e366528d6eea707ad71ea476d"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.195716 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22fj8" event={"ID":"9b1d157d-be5c-47fc-9325-20373433ad4c","Type":"ContainerStarted","Data":"99ef60a40659f344091a54dd905b5f73eb9c5ad465c57e5483bc39f54409d5eb"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.195726 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hvt8s" event={"ID":"c37ecde5-29e1-4377-95be-2b7da9e65110","Type":"ContainerStarted","Data":"a99ecfb27d1d44b34ab97809d490af7f5072bb14e9f1903df2643685e9defc0a"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.195735 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c46c9cb9c-wcdw2" event={"ID":"e7353446-2b8c-48fc-8267-42b6f3ac0502","Type":"ContainerStarted","Data":"bfeab9714cc74f87e851852771321b417347a39b0a93500c4f97e284c37d5dca"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.205523 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ccc76" event={"ID":"977876f3-91a6-4f81-9c62-b7d5fc9b1508","Type":"ContainerStarted","Data":"2d028d413186bba2df087c41e44cfb7465dc826d0fa34308f732a16b3953bb35"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.205575 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ccc76" event={"ID":"977876f3-91a6-4f81-9c62-b7d5fc9b1508","Type":"ContainerStarted","Data":"fa0e742ccc721bec65e8465ea5f17ed2872956e9ce35da5dedaf653a554fc05d"} Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.228863 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.229632 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-ccc76" podStartSLOduration=2.229613338 podStartE2EDuration="2.229613338s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:46.222065852 +0000 UTC m=+862.364568143" watchObservedRunningTime="2025-11-22 09:17:46.229613338 +0000 UTC m=+862.372115628" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.556769 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.619562 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6bb9684445-rmchz"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.647932 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-c9ccb65bf-fw9qk"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.650157 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.684901 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c9ccb65bf-fw9qk"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.694890 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.770737 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-scripts\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.770781 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cd0df95-6511-4220-8bdd-53ed795a0606-logs\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.770805 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-config-data\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.770873 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8cd0df95-6511-4220-8bdd-53ed795a0606-horizon-secret-key\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.770961 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9pwk\" (UniqueName: \"kubernetes.io/projected/8cd0df95-6511-4220-8bdd-53ed795a0606-kube-api-access-v9pwk\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.819457 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.872500 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cd0df95-6511-4220-8bdd-53ed795a0606-logs\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.872589 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-config-data\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.872708 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8cd0df95-6511-4220-8bdd-53ed795a0606-horizon-secret-key\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.873286 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9pwk\" (UniqueName: \"kubernetes.io/projected/8cd0df95-6511-4220-8bdd-53ed795a0606-kube-api-access-v9pwk\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.873478 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-scripts\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.874395 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-scripts\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.875354 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cd0df95-6511-4220-8bdd-53ed795a0606-logs\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.879960 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8cd0df95-6511-4220-8bdd-53ed795a0606-horizon-secret-key\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.881157 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-config-data\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.894396 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9pwk\" (UniqueName: \"kubernetes.io/projected/8cd0df95-6511-4220-8bdd-53ed795a0606-kube-api-access-v9pwk\") pod \"horizon-c9ccb65bf-fw9qk\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:46 crc kubenswrapper[4693]: I1122 09:17:46.978427 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.036306 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.178618 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-nb\") pod \"8ea632be-fc87-4e83-85b3-b52e3ca01560\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.178788 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-swift-storage-0\") pod \"8ea632be-fc87-4e83-85b3-b52e3ca01560\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.178831 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-svc\") pod \"8ea632be-fc87-4e83-85b3-b52e3ca01560\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.178939 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-config\") pod \"8ea632be-fc87-4e83-85b3-b52e3ca01560\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.179000 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8dnk\" (UniqueName: \"kubernetes.io/projected/8ea632be-fc87-4e83-85b3-b52e3ca01560-kube-api-access-z8dnk\") pod \"8ea632be-fc87-4e83-85b3-b52e3ca01560\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.179028 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-sb\") pod \"8ea632be-fc87-4e83-85b3-b52e3ca01560\" (UID: \"8ea632be-fc87-4e83-85b3-b52e3ca01560\") " Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.186143 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ea632be-fc87-4e83-85b3-b52e3ca01560-kube-api-access-z8dnk" (OuterVolumeSpecName: "kube-api-access-z8dnk") pod "8ea632be-fc87-4e83-85b3-b52e3ca01560" (UID: "8ea632be-fc87-4e83-85b3-b52e3ca01560"). InnerVolumeSpecName "kube-api-access-z8dnk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.197584 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8ea632be-fc87-4e83-85b3-b52e3ca01560" (UID: "8ea632be-fc87-4e83-85b3-b52e3ca01560"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.204471 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-config" (OuterVolumeSpecName: "config") pod "8ea632be-fc87-4e83-85b3-b52e3ca01560" (UID: "8ea632be-fc87-4e83-85b3-b52e3ca01560"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.209311 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8ea632be-fc87-4e83-85b3-b52e3ca01560" (UID: "8ea632be-fc87-4e83-85b3-b52e3ca01560"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.220211 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8ea632be-fc87-4e83-85b3-b52e3ca01560" (UID: "8ea632be-fc87-4e83-85b3-b52e3ca01560"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.227812 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1248b42-dbbb-4f71-9691-4d37f5cee00f","Type":"ContainerStarted","Data":"a02f73e78109482178f60db50f8118496e581113f949dccc3005c68c3c58a805"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.227910 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1248b42-dbbb-4f71-9691-4d37f5cee00f","Type":"ContainerStarted","Data":"caab0acb9c763b7ab1ed95c37516f84558d680968f0c70a4d66d494bda1683a0"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.233170 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fa77cb55-27dd-45f4-bf88-08e359c1b337","Type":"ContainerStarted","Data":"394900ad6f0c2f7749676037a3b4c4b6c6f1cb82b906aa76fb557a3bea1a427b"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.233207 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fa77cb55-27dd-45f4-bf88-08e359c1b337","Type":"ContainerStarted","Data":"0901b365571b723b58f1b907790aec57f7a11a58d7133336308d33592cc8532b"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.235477 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fhrhl" event={"ID":"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9","Type":"ContainerStarted","Data":"6ad6919e0ae24ac05c67aba1df8fcf3b3cb469d571c4cf8d228565f320003e60"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.243282 4693 generic.go:334] "Generic (PLEG): container finished" podID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerID="ab5b2db282e5b8a4e86e7ad05df8c13cf4b68c11b7848d559a1069d101f9975a" exitCode=0 Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.243408 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-l9pkd" event={"ID":"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6","Type":"ContainerDied","Data":"ab5b2db282e5b8a4e86e7ad05df8c13cf4b68c11b7848d559a1069d101f9975a"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.243440 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-l9pkd" event={"ID":"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6","Type":"ContainerStarted","Data":"a02751ac6e7446db3b755770c00c3c26236d34605f3ccff905b8ba6679460fa9"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.247911 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8ea632be-fc87-4e83-85b3-b52e3ca01560" (UID: "8ea632be-fc87-4e83-85b3-b52e3ca01560"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.252888 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" event={"ID":"8ea632be-fc87-4e83-85b3-b52e3ca01560","Type":"ContainerDied","Data":"27043f1ffab157fe3e09d832f47c448be5d53a2e86f60aa1bce7b3854d4046bd"} Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.252936 4693 scope.go:117] "RemoveContainer" containerID="9bdb00c3d162a2ae6a2102548d1f9149ab49cdb4195d7e25595ec9eefba6e30b" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.252895 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5cd88b5f5c-rf9vc" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.281435 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.282728 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.282829 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.282906 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8dnk\" (UniqueName: \"kubernetes.io/projected/8ea632be-fc87-4e83-85b3-b52e3ca01560-kube-api-access-z8dnk\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.283000 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.283075 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8ea632be-fc87-4e83-85b3-b52e3ca01560-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.314491 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5cd88b5f5c-rf9vc"] Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.320476 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5cd88b5f5c-rf9vc"] Nov 22 09:17:47 crc kubenswrapper[4693]: I1122 09:17:47.423403 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-c9ccb65bf-fw9qk"] Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.163583 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ea632be-fc87-4e83-85b3-b52e3ca01560" path="/var/lib/kubelet/pods/8ea632be-fc87-4e83-85b3-b52e3ca01560/volumes" Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.276714 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fa77cb55-27dd-45f4-bf88-08e359c1b337","Type":"ContainerStarted","Data":"99dc93d52dfd369610ffb5620be48acd5549089fe21a2048bb5f6f1cd4801ed7"} Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.276875 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-log" containerID="cri-o://394900ad6f0c2f7749676037a3b4c4b6c6f1cb82b906aa76fb557a3bea1a427b" gracePeriod=30 Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.277375 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-httpd" containerID="cri-o://99dc93d52dfd369610ffb5620be48acd5549089fe21a2048bb5f6f1cd4801ed7" gracePeriod=30 Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.279012 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c9ccb65bf-fw9qk" event={"ID":"8cd0df95-6511-4220-8bdd-53ed795a0606","Type":"ContainerStarted","Data":"1b0af9707d435c7cba80fc1f90c94ced33bba99a3725e4881f7ef0e9601a51bd"} Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.283490 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-l9pkd" event={"ID":"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6","Type":"ContainerStarted","Data":"129172c7678c6e9585e9c7b29a6bb271ef2b812be58f7843dc66eedfd7309456"} Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.284348 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.296821 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1248b42-dbbb-4f71-9691-4d37f5cee00f","Type":"ContainerStarted","Data":"3ca8623712c74f77d13feb3325edf6951dd01ae9211ae9737235040c515ef80d"} Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.296918 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-log" containerID="cri-o://a02f73e78109482178f60db50f8118496e581113f949dccc3005c68c3c58a805" gracePeriod=30 Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.297109 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-httpd" containerID="cri-o://3ca8623712c74f77d13feb3325edf6951dd01ae9211ae9737235040c515ef80d" gracePeriod=30 Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.297590 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.297576792 podStartE2EDuration="4.297576792s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:48.291121392 +0000 UTC m=+864.433623683" watchObservedRunningTime="2025-11-22 09:17:48.297576792 +0000 UTC m=+864.440079083" Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.319686 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-798745f775-l9pkd" podStartSLOduration=4.319670987 podStartE2EDuration="4.319670987s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:48.315665464 +0000 UTC m=+864.458167754" watchObservedRunningTime="2025-11-22 09:17:48.319670987 +0000 UTC m=+864.462173278" Nov 22 09:17:48 crc kubenswrapper[4693]: I1122 09:17:48.348766 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.348742933 podStartE2EDuration="4.348742933s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:17:48.337348855 +0000 UTC m=+864.479851146" watchObservedRunningTime="2025-11-22 09:17:48.348742933 +0000 UTC m=+864.491245225" Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.308654 4693 generic.go:334] "Generic (PLEG): container finished" podID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerID="99dc93d52dfd369610ffb5620be48acd5549089fe21a2048bb5f6f1cd4801ed7" exitCode=0 Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.308903 4693 generic.go:334] "Generic (PLEG): container finished" podID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerID="394900ad6f0c2f7749676037a3b4c4b6c6f1cb82b906aa76fb557a3bea1a427b" exitCode=143 Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.308945 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fa77cb55-27dd-45f4-bf88-08e359c1b337","Type":"ContainerDied","Data":"99dc93d52dfd369610ffb5620be48acd5549089fe21a2048bb5f6f1cd4801ed7"} Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.308970 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fa77cb55-27dd-45f4-bf88-08e359c1b337","Type":"ContainerDied","Data":"394900ad6f0c2f7749676037a3b4c4b6c6f1cb82b906aa76fb557a3bea1a427b"} Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.316073 4693 generic.go:334] "Generic (PLEG): container finished" podID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerID="3ca8623712c74f77d13feb3325edf6951dd01ae9211ae9737235040c515ef80d" exitCode=0 Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.316213 4693 generic.go:334] "Generic (PLEG): container finished" podID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerID="a02f73e78109482178f60db50f8118496e581113f949dccc3005c68c3c58a805" exitCode=143 Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.317022 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1248b42-dbbb-4f71-9691-4d37f5cee00f","Type":"ContainerDied","Data":"3ca8623712c74f77d13feb3325edf6951dd01ae9211ae9737235040c515ef80d"} Nov 22 09:17:49 crc kubenswrapper[4693]: I1122 09:17:49.317044 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1248b42-dbbb-4f71-9691-4d37f5cee00f","Type":"ContainerDied","Data":"a02f73e78109482178f60db50f8118496e581113f949dccc3005c68c3c58a805"} Nov 22 09:17:50 crc kubenswrapper[4693]: I1122 09:17:50.330008 4693 generic.go:334] "Generic (PLEG): container finished" podID="9b1d157d-be5c-47fc-9325-20373433ad4c" containerID="66223a546c5fee881b8313aef5aa8941b7f04f1e366528d6eea707ad71ea476d" exitCode=0 Nov 22 09:17:50 crc kubenswrapper[4693]: I1122 09:17:50.330096 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22fj8" event={"ID":"9b1d157d-be5c-47fc-9325-20373433ad4c","Type":"ContainerDied","Data":"66223a546c5fee881b8313aef5aa8941b7f04f1e366528d6eea707ad71ea476d"} Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.319613 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c46c9cb9c-wcdw2"] Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.354920 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-67dc677d8b-gqzpn"] Nov 22 09:17:53 crc kubenswrapper[4693]: E1122 09:17:53.355273 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ea632be-fc87-4e83-85b3-b52e3ca01560" containerName="init" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.355289 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ea632be-fc87-4e83-85b3-b52e3ca01560" containerName="init" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.355562 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ea632be-fc87-4e83-85b3-b52e3ca01560" containerName="init" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.356812 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.360736 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.366125 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67dc677d8b-gqzpn"] Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.448078 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c9ccb65bf-fw9qk"] Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.471804 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-7fd67558f8-nzmxr"] Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.477191 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.484407 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7fd67558f8-nzmxr"] Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.511992 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-tls-certs\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.512255 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-scripts\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.512363 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77e4ba75-fe85-4b6f-8946-30ab162512bc-logs\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.512466 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9kd4\" (UniqueName: \"kubernetes.io/projected/77e4ba75-fe85-4b6f-8946-30ab162512bc-kube-api-access-q9kd4\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.512923 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-combined-ca-bundle\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.513292 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-secret-key\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.513411 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-config-data\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618138 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-horizon-secret-key\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618446 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-horizon-tls-certs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618481 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-tls-certs\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618515 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd3b9780-060c-4788-9800-20c1ac3b2e95-config-data\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618559 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-scripts\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618576 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77e4ba75-fe85-4b6f-8946-30ab162512bc-logs\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618606 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9kd4\" (UniqueName: \"kubernetes.io/projected/77e4ba75-fe85-4b6f-8946-30ab162512bc-kube-api-access-q9kd4\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618640 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4dbs\" (UniqueName: \"kubernetes.io/projected/dd3b9780-060c-4788-9800-20c1ac3b2e95-kube-api-access-h4dbs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618679 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-combined-ca-bundle\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618696 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-combined-ca-bundle\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618720 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-secret-key\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618742 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-config-data\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618764 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dd3b9780-060c-4788-9800-20c1ac3b2e95-scripts\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.618784 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd3b9780-060c-4788-9800-20c1ac3b2e95-logs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.619492 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77e4ba75-fe85-4b6f-8946-30ab162512bc-logs\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.620464 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-config-data\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.621471 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-scripts\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.629484 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-combined-ca-bundle\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.629497 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-tls-certs\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.631798 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9kd4\" (UniqueName: \"kubernetes.io/projected/77e4ba75-fe85-4b6f-8946-30ab162512bc-kube-api-access-q9kd4\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.637113 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-secret-key\") pod \"horizon-67dc677d8b-gqzpn\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.693765 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720402 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-combined-ca-bundle\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720477 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dd3b9780-060c-4788-9800-20c1ac3b2e95-scripts\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720508 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd3b9780-060c-4788-9800-20c1ac3b2e95-logs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720568 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-horizon-secret-key\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720593 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-horizon-tls-certs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720662 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd3b9780-060c-4788-9800-20c1ac3b2e95-config-data\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.720799 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4dbs\" (UniqueName: \"kubernetes.io/projected/dd3b9780-060c-4788-9800-20c1ac3b2e95-kube-api-access-h4dbs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.723233 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dd3b9780-060c-4788-9800-20c1ac3b2e95-logs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.723429 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/dd3b9780-060c-4788-9800-20c1ac3b2e95-scripts\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.723458 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-combined-ca-bundle\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.723718 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/dd3b9780-060c-4788-9800-20c1ac3b2e95-config-data\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.724903 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-horizon-tls-certs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.725710 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/dd3b9780-060c-4788-9800-20c1ac3b2e95-horizon-secret-key\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.736389 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4dbs\" (UniqueName: \"kubernetes.io/projected/dd3b9780-060c-4788-9800-20c1ac3b2e95-kube-api-access-h4dbs\") pod \"horizon-7fd67558f8-nzmxr\" (UID: \"dd3b9780-060c-4788-9800-20c1ac3b2e95\") " pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:53 crc kubenswrapper[4693]: I1122 09:17:53.849040 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:17:54 crc kubenswrapper[4693]: I1122 09:17:54.366652 4693 generic.go:334] "Generic (PLEG): container finished" podID="977876f3-91a6-4f81-9c62-b7d5fc9b1508" containerID="2d028d413186bba2df087c41e44cfb7465dc826d0fa34308f732a16b3953bb35" exitCode=0 Nov 22 09:17:54 crc kubenswrapper[4693]: I1122 09:17:54.366692 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ccc76" event={"ID":"977876f3-91a6-4f81-9c62-b7d5fc9b1508","Type":"ContainerDied","Data":"2d028d413186bba2df087c41e44cfb7465dc826d0fa34308f732a16b3953bb35"} Nov 22 09:17:55 crc kubenswrapper[4693]: I1122 09:17:55.578671 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:17:55 crc kubenswrapper[4693]: I1122 09:17:55.622375 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cb6b7c77c-l62xw"] Nov 22 09:17:55 crc kubenswrapper[4693]: I1122 09:17:55.622599 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="dnsmasq-dns" containerID="cri-o://5b1cbc94dc76ba2d10ec91115f5c08136a658184b45f4c9b28d0d0ce348cefc6" gracePeriod=10 Nov 22 09:17:56 crc kubenswrapper[4693]: I1122 09:17:56.379457 4693 generic.go:334] "Generic (PLEG): container finished" podID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerID="5b1cbc94dc76ba2d10ec91115f5c08136a658184b45f4c9b28d0d0ce348cefc6" exitCode=0 Nov 22 09:17:56 crc kubenswrapper[4693]: I1122 09:17:56.379525 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" event={"ID":"84631f2f-64de-4a1a-a4ba-6d19d02ccd52","Type":"ContainerDied","Data":"5b1cbc94dc76ba2d10ec91115f5c08136a658184b45f4c9b28d0d0ce348cefc6"} Nov 22 09:17:58 crc kubenswrapper[4693]: I1122 09:17:58.553400 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: connect: connection refused" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.370881 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.374716 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.408299 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.410540 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.429626 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"fa77cb55-27dd-45f4-bf88-08e359c1b337","Type":"ContainerDied","Data":"0901b365571b723b58f1b907790aec57f7a11a58d7133336308d33592cc8532b"} Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.429677 4693 scope.go:117] "RemoveContainer" containerID="99dc93d52dfd369610ffb5620be48acd5549089fe21a2048bb5f6f1cd4801ed7" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.429799 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.452911 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-22fj8" event={"ID":"9b1d157d-be5c-47fc-9325-20373433ad4c","Type":"ContainerDied","Data":"99ef60a40659f344091a54dd905b5f73eb9c5ad465c57e5483bc39f54409d5eb"} Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.452943 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="99ef60a40659f344091a54dd905b5f73eb9c5ad465c57e5483bc39f54409d5eb" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.452990 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-22fj8" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.454871 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-ccc76" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.454881 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-ccc76" event={"ID":"977876f3-91a6-4f81-9c62-b7d5fc9b1508","Type":"ContainerDied","Data":"fa0e742ccc721bec65e8465ea5f17ed2872956e9ce35da5dedaf653a554fc05d"} Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.454908 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fa0e742ccc721bec65e8465ea5f17ed2872956e9ce35da5dedaf653a554fc05d" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.459137 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"f1248b42-dbbb-4f71-9691-4d37f5cee00f","Type":"ContainerDied","Data":"caab0acb9c763b7ab1ed95c37516f84558d680968f0c70a4d66d494bda1683a0"} Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.459215 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527721 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-internal-tls-certs\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527764 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-scripts\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527791 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-combined-ca-bundle\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527808 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-config-data\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527866 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-httpd-run\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527882 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-logs\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527912 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-combined-ca-bundle\") pod \"9b1d157d-be5c-47fc-9325-20373433ad4c\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.527961 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-combined-ca-bundle\") pod \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528021 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-combined-ca-bundle\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528063 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-config\") pod \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528089 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-scripts\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528111 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-httpd-run\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528176 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528200 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-credential-keys\") pod \"9b1d157d-be5c-47fc-9325-20373433ad4c\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528229 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-fernet-keys\") pod \"9b1d157d-be5c-47fc-9325-20373433ad4c\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528244 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-config-data\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528288 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f6tgh\" (UniqueName: \"kubernetes.io/projected/f1248b42-dbbb-4f71-9691-4d37f5cee00f-kube-api-access-f6tgh\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528324 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-scripts\") pod \"9b1d157d-be5c-47fc-9325-20373433ad4c\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528396 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ppvln\" (UniqueName: \"kubernetes.io/projected/fa77cb55-27dd-45f4-bf88-08e359c1b337-kube-api-access-ppvln\") pod \"fa77cb55-27dd-45f4-bf88-08e359c1b337\" (UID: \"fa77cb55-27dd-45f4-bf88-08e359c1b337\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528424 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-public-tls-certs\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528452 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sd9qh\" (UniqueName: \"kubernetes.io/projected/977876f3-91a6-4f81-9c62-b7d5fc9b1508-kube-api-access-sd9qh\") pod \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\" (UID: \"977876f3-91a6-4f81-9c62-b7d5fc9b1508\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528781 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.528896 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.529144 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.529182 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-config-data\") pod \"9b1d157d-be5c-47fc-9325-20373433ad4c\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.529202 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lg8wm\" (UniqueName: \"kubernetes.io/projected/9b1d157d-be5c-47fc-9325-20373433ad4c-kube-api-access-lg8wm\") pod \"9b1d157d-be5c-47fc-9325-20373433ad4c\" (UID: \"9b1d157d-be5c-47fc-9325-20373433ad4c\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.529241 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-logs\") pod \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\" (UID: \"f1248b42-dbbb-4f71-9691-4d37f5cee00f\") " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.529879 4693 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.529892 4693 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.530114 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-logs" (OuterVolumeSpecName: "logs") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.531019 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-logs" (OuterVolumeSpecName: "logs") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.533714 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-scripts" (OuterVolumeSpecName: "scripts") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.537487 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-scripts" (OuterVolumeSpecName: "scripts") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.537774 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "9b1d157d-be5c-47fc-9325-20373433ad4c" (UID: "9b1d157d-be5c-47fc-9325-20373433ad4c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.538237 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "9b1d157d-be5c-47fc-9325-20373433ad4c" (UID: "9b1d157d-be5c-47fc-9325-20373433ad4c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.538246 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa77cb55-27dd-45f4-bf88-08e359c1b337-kube-api-access-ppvln" (OuterVolumeSpecName: "kube-api-access-ppvln") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "kube-api-access-ppvln". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.538785 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b1d157d-be5c-47fc-9325-20373433ad4c-kube-api-access-lg8wm" (OuterVolumeSpecName: "kube-api-access-lg8wm") pod "9b1d157d-be5c-47fc-9325-20373433ad4c" (UID: "9b1d157d-be5c-47fc-9325-20373433ad4c"). InnerVolumeSpecName "kube-api-access-lg8wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.538976 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-scripts" (OuterVolumeSpecName: "scripts") pod "9b1d157d-be5c-47fc-9325-20373433ad4c" (UID: "9b1d157d-be5c-47fc-9325-20373433ad4c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.539248 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.545977 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.546790 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/977876f3-91a6-4f81-9c62-b7d5fc9b1508-kube-api-access-sd9qh" (OuterVolumeSpecName: "kube-api-access-sd9qh") pod "977876f3-91a6-4f81-9c62-b7d5fc9b1508" (UID: "977876f3-91a6-4f81-9c62-b7d5fc9b1508"). InnerVolumeSpecName "kube-api-access-sd9qh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.549788 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f1248b42-dbbb-4f71-9691-4d37f5cee00f-kube-api-access-f6tgh" (OuterVolumeSpecName: "kube-api-access-f6tgh") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "kube-api-access-f6tgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.553300 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9b1d157d-be5c-47fc-9325-20373433ad4c" (UID: "9b1d157d-be5c-47fc-9325-20373433ad4c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.553935 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-config" (OuterVolumeSpecName: "config") pod "977876f3-91a6-4f81-9c62-b7d5fc9b1508" (UID: "977876f3-91a6-4f81-9c62-b7d5fc9b1508"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.559165 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-config-data" (OuterVolumeSpecName: "config-data") pod "9b1d157d-be5c-47fc-9325-20373433ad4c" (UID: "9b1d157d-be5c-47fc-9325-20373433ad4c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.571117 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "977876f3-91a6-4f81-9c62-b7d5fc9b1508" (UID: "977876f3-91a6-4f81-9c62-b7d5fc9b1508"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.572007 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.575890 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.577139 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-config-data" (OuterVolumeSpecName: "config-data") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.582517 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "f1248b42-dbbb-4f71-9691-4d37f5cee00f" (UID: "f1248b42-dbbb-4f71-9691-4d37f5cee00f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.583554 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-config-data" (OuterVolumeSpecName: "config-data") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.590072 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "fa77cb55-27dd-45f4-bf88-08e359c1b337" (UID: "fa77cb55-27dd-45f4-bf88-08e359c1b337"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632371 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632401 4693 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632417 4693 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632427 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632436 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f6tgh\" (UniqueName: \"kubernetes.io/projected/f1248b42-dbbb-4f71-9691-4d37f5cee00f-kube-api-access-f6tgh\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632447 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632456 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ppvln\" (UniqueName: \"kubernetes.io/projected/fa77cb55-27dd-45f4-bf88-08e359c1b337-kube-api-access-ppvln\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632464 4693 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632473 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sd9qh\" (UniqueName: \"kubernetes.io/projected/977876f3-91a6-4f81-9c62-b7d5fc9b1508-kube-api-access-sd9qh\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632488 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632498 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632513 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lg8wm\" (UniqueName: \"kubernetes.io/projected/9b1d157d-be5c-47fc-9325-20373433ad4c-kube-api-access-lg8wm\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632522 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f1248b42-dbbb-4f71-9691-4d37f5cee00f-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632529 4693 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632538 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632546 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632554 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632561 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/fa77cb55-27dd-45f4-bf88-08e359c1b337-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632568 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9b1d157d-be5c-47fc-9325-20373433ad4c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632576 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632584 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa77cb55-27dd-45f4-bf88-08e359c1b337-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632591 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/977876f3-91a6-4f81-9c62-b7d5fc9b1508-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.632599 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f1248b42-dbbb-4f71-9691-4d37f5cee00f-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.645519 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.646538 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.733617 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.733645 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.757811 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.767976 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773158 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: E1122 09:17:59.773758 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-httpd" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773774 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-httpd" Nov 22 09:17:59 crc kubenswrapper[4693]: E1122 09:17:59.773785 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-log" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773791 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-log" Nov 22 09:17:59 crc kubenswrapper[4693]: E1122 09:17:59.773799 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-log" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773805 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-log" Nov 22 09:17:59 crc kubenswrapper[4693]: E1122 09:17:59.773816 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="977876f3-91a6-4f81-9c62-b7d5fc9b1508" containerName="neutron-db-sync" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773821 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="977876f3-91a6-4f81-9c62-b7d5fc9b1508" containerName="neutron-db-sync" Nov 22 09:17:59 crc kubenswrapper[4693]: E1122 09:17:59.773835 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-httpd" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773854 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-httpd" Nov 22 09:17:59 crc kubenswrapper[4693]: E1122 09:17:59.773871 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b1d157d-be5c-47fc-9325-20373433ad4c" containerName="keystone-bootstrap" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.773877 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b1d157d-be5c-47fc-9325-20373433ad4c" containerName="keystone-bootstrap" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.774024 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-log" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.774035 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-httpd" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.774044 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" containerName="glance-httpd" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.774051 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="977876f3-91a6-4f81-9c62-b7d5fc9b1508" containerName="neutron-db-sync" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.774060 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" containerName="glance-log" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.774070 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b1d157d-be5c-47fc-9325-20373433ad4c" containerName="keystone-bootstrap" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.775059 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.776519 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.777003 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.777017 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.777213 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cwf55" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.813889 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.825245 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.829560 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.849882 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.853701 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.855813 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.855949 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.872996 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.938992 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939043 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939068 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-logs\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939142 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bdml\" (UniqueName: \"kubernetes.io/projected/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-kube-api-access-7bdml\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939278 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939303 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939327 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:17:59 crc kubenswrapper[4693]: I1122 09:17:59.939386 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.041955 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042002 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq46t\" (UniqueName: \"kubernetes.io/projected/648ffab2-f448-4bb1-a53f-1178c8dd8c32-kube-api-access-tq46t\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042076 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-config-data\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042103 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042124 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042143 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-logs\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042169 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bdml\" (UniqueName: \"kubernetes.io/projected/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-kube-api-access-7bdml\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042187 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-logs\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042312 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042364 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042401 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-scripts\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042424 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042452 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042469 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042493 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042517 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.042541 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.043099 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-logs\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.043129 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.047546 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-scripts\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.047926 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-config-data\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.052328 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.052430 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.057556 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bdml\" (UniqueName: \"kubernetes.io/projected/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-kube-api-access-7bdml\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.062117 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.110616 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144333 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq46t\" (UniqueName: \"kubernetes.io/projected/648ffab2-f448-4bb1-a53f-1178c8dd8c32-kube-api-access-tq46t\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144480 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-config-data\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144543 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-logs\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144569 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144617 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-scripts\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144672 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144705 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.144776 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.145073 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-logs\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.146416 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.146535 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.147926 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-scripts\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.148400 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.148854 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-config-data\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.149600 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.158242 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f1248b42-dbbb-4f71-9691-4d37f5cee00f" path="/var/lib/kubelet/pods/f1248b42-dbbb-4f71-9691-4d37f5cee00f/volumes" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.159034 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq46t\" (UniqueName: \"kubernetes.io/projected/648ffab2-f448-4bb1-a53f-1178c8dd8c32-kube-api-access-tq46t\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.159358 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa77cb55-27dd-45f4-bf88-08e359c1b337" path="/var/lib/kubelet/pods/fa77cb55-27dd-45f4-bf88-08e359c1b337/volumes" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.164909 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.249462 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.249524 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.469189 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.654512 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-22fj8"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.686050 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-22fj8"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.714979 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-k75fh"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.716398 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.720645 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k575m" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.720909 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.721146 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.721280 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.721450 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.738754 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-k75fh"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.748343 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5b95cfcf9c-8rmgm"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.749805 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.752738 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-5f8cbc66c6-d5d77"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.753920 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.755590 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.756190 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-cjrhh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.756336 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.756442 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.760201 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b95cfcf9c-8rmgm"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.764699 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f8cbc66c6-d5d77"] Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871520 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftdmd\" (UniqueName: \"kubernetes.io/projected/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-kube-api-access-ftdmd\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871566 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-config\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871590 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-svc\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871613 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-sb\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871637 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-nb\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871651 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-httpd-config\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871664 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-config-data\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871696 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d62b\" (UniqueName: \"kubernetes.io/projected/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-kube-api-access-4d62b\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871713 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-swift-storage-0\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871738 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-scripts\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871754 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-combined-ca-bundle\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871774 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-fernet-keys\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871813 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-credential-keys\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871834 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tz42n\" (UniqueName: \"kubernetes.io/projected/6b70ae24-cb73-4a6b-8c10-6567f24991fe-kube-api-access-tz42n\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871865 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-config\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871886 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-combined-ca-bundle\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.871900 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-ovndb-tls-certs\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.973828 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-sb\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.973890 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-nb\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.973917 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-httpd-config\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.973935 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-config-data\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.973971 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d62b\" (UniqueName: \"kubernetes.io/projected/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-kube-api-access-4d62b\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.973988 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-swift-storage-0\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974027 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-scripts\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974045 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-combined-ca-bundle\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974064 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-fernet-keys\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974108 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-credential-keys\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974128 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tz42n\" (UniqueName: \"kubernetes.io/projected/6b70ae24-cb73-4a6b-8c10-6567f24991fe-kube-api-access-tz42n\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974147 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-config\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974167 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-combined-ca-bundle\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974182 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-ovndb-tls-certs\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974216 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftdmd\" (UniqueName: \"kubernetes.io/projected/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-kube-api-access-ftdmd\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974235 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-config\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974250 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-svc\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.974984 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-svc\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.975250 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-sb\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.976062 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-nb\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.978227 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-swift-storage-0\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.978383 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-config\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.979328 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-scripts\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.979337 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-credential-keys\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.980363 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-config-data\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.987455 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-fernet-keys\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.987542 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-combined-ca-bundle\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.987684 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-httpd-config\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.988350 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-ovndb-tls-certs\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.988747 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-combined-ca-bundle\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.990573 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tz42n\" (UniqueName: \"kubernetes.io/projected/6b70ae24-cb73-4a6b-8c10-6567f24991fe-kube-api-access-tz42n\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.990589 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-config\") pod \"neutron-5f8cbc66c6-d5d77\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.991058 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d62b\" (UniqueName: \"kubernetes.io/projected/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-kube-api-access-4d62b\") pod \"dnsmasq-dns-5b95cfcf9c-8rmgm\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:00 crc kubenswrapper[4693]: I1122 09:18:00.993380 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftdmd\" (UniqueName: \"kubernetes.io/projected/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-kube-api-access-ftdmd\") pod \"keystone-bootstrap-k75fh\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:01 crc kubenswrapper[4693]: I1122 09:18:01.065549 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:01 crc kubenswrapper[4693]: I1122 09:18:01.076967 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:01 crc kubenswrapper[4693]: I1122 09:18:01.086450 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.153937 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b1d157d-be5c-47fc-9325-20373433ad4c" path="/var/lib/kubelet/pods/9b1d157d-be5c-47fc-9325-20373433ad4c/volumes" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.771490 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7fbc84ccfc-8tdp6"] Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.773069 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.776202 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.776371 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.786379 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7fbc84ccfc-8tdp6"] Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.915728 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-combined-ca-bundle\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.915792 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcgpx\" (UniqueName: \"kubernetes.io/projected/bd35a927-864c-4718-891f-1a036a99ddfb-kube-api-access-mcgpx\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.915869 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-httpd-config\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.915905 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-ovndb-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.915935 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-config\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.915966 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-internal-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:02 crc kubenswrapper[4693]: I1122 09:18:02.916121 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-public-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018366 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-public-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018447 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-combined-ca-bundle\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018480 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcgpx\" (UniqueName: \"kubernetes.io/projected/bd35a927-864c-4718-891f-1a036a99ddfb-kube-api-access-mcgpx\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018515 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-httpd-config\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018539 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-ovndb-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018561 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-config\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.018600 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-internal-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.023563 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-public-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.024115 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-combined-ca-bundle\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.024398 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-httpd-config\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.026757 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-config\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.027017 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-internal-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.027440 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd35a927-864c-4718-891f-1a036a99ddfb-ovndb-tls-certs\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.039525 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcgpx\" (UniqueName: \"kubernetes.io/projected/bd35a927-864c-4718-891f-1a036a99ddfb-kube-api-access-mcgpx\") pod \"neutron-7fbc84ccfc-8tdp6\" (UID: \"bd35a927-864c-4718-891f-1a036a99ddfb\") " pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:03 crc kubenswrapper[4693]: I1122 09:18:03.097365 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:06 crc kubenswrapper[4693]: E1122 09:18:06.923434 4693 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879" Nov 22 09:18:06 crc kubenswrapper[4693]: E1122 09:18:06.923829 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qg4fm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-qjqt5_openstack(be72ccb6-821d-4b5a-a4d0-6d866fc617f6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 09:18:06 crc kubenswrapper[4693]: E1122 09:18:06.925039 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-qjqt5" podUID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" Nov 22 09:18:07 crc kubenswrapper[4693]: E1122 09:18:07.149107 4693 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140" Nov 22 09:18:07 crc kubenswrapper[4693]: E1122 09:18:07.149222 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central@sha256:d375d370be5ead0dac71109af644849e5795f535f9ad8eeacea261d77ae6f140,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n59h566h667h64fh6h65fh6h88h664h84h554h579h684h695hf5h687h5b4hd8hch59dh65ch5cbhffh68fhb9h648h5bch589hf7h97hb8hbcq,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qqfwf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(079d82bf-3312-4f7f-8ef8-6d55b20c8f5d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.181018 4693 scope.go:117] "RemoveContainer" containerID="394900ad6f0c2f7749676037a3b4c4b6c6f1cb82b906aa76fb557a3bea1a427b" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.337608 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.367371 4693 scope.go:117] "RemoveContainer" containerID="3ca8623712c74f77d13feb3325edf6951dd01ae9211ae9737235040c515ef80d" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.486678 4693 scope.go:117] "RemoveContainer" containerID="a02f73e78109482178f60db50f8118496e581113f949dccc3005c68c3c58a805" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.498943 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-swift-storage-0\") pod \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.499067 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-sb\") pod \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.499107 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-svc\") pod \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.499139 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-config\") pod \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.499247 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-nb\") pod \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.499270 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cmv74\" (UniqueName: \"kubernetes.io/projected/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-kube-api-access-cmv74\") pod \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\" (UID: \"84631f2f-64de-4a1a-a4ba-6d19d02ccd52\") " Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.506382 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-kube-api-access-cmv74" (OuterVolumeSpecName: "kube-api-access-cmv74") pod "84631f2f-64de-4a1a-a4ba-6d19d02ccd52" (UID: "84631f2f-64de-4a1a-a4ba-6d19d02ccd52"). InnerVolumeSpecName "kube-api-access-cmv74". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.548394 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" event={"ID":"84631f2f-64de-4a1a-a4ba-6d19d02ccd52","Type":"ContainerDied","Data":"a9b8ac7acea28d75b3faae56ebdd633fab127d33f67262a464376f306661c07f"} Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.548421 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.548495 4693 scope.go:117] "RemoveContainer" containerID="5b1cbc94dc76ba2d10ec91115f5c08136a658184b45f4c9b28d0d0ce348cefc6" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.551950 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hvt8s" event={"ID":"c37ecde5-29e1-4377-95be-2b7da9e65110","Type":"ContainerStarted","Data":"f4b4a89a35e7854bde49334ddf1b4372005107a04cd01768c7ab9a094aa10292"} Nov 22 09:18:07 crc kubenswrapper[4693]: E1122 09:18:07.554336 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:37d64e0a00c54e71a4c1fcbbbf7e832f6886ffd03c9a02b6ee3ca48fabc30879\\\"\"" pod="openstack/cinder-db-sync-qjqt5" podUID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.554918 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-fhrhl" podStartSLOduration=2.521158595 podStartE2EDuration="23.554899082s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="2025-11-22 09:17:46.178117446 +0000 UTC m=+862.320619737" lastFinishedPulling="2025-11-22 09:18:07.211857944 +0000 UTC m=+883.354360224" observedRunningTime="2025-11-22 09:18:07.553630706 +0000 UTC m=+883.696132998" watchObservedRunningTime="2025-11-22 09:18:07.554899082 +0000 UTC m=+883.697401373" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.575887 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-hvt8s" podStartSLOduration=2.301737145 podStartE2EDuration="23.575870344s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="2025-11-22 09:17:45.906928514 +0000 UTC m=+862.049430805" lastFinishedPulling="2025-11-22 09:18:07.181061713 +0000 UTC m=+883.323564004" observedRunningTime="2025-11-22 09:18:07.568671204 +0000 UTC m=+883.711173495" watchObservedRunningTime="2025-11-22 09:18:07.575870344 +0000 UTC m=+883.718372635" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.583951 4693 scope.go:117] "RemoveContainer" containerID="56510dbabbe3890b7ca9b3a324ec59009dc4ce9a9273645be92965e712e22d84" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.601951 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cmv74\" (UniqueName: \"kubernetes.io/projected/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-kube-api-access-cmv74\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.681559 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "84631f2f-64de-4a1a-a4ba-6d19d02ccd52" (UID: "84631f2f-64de-4a1a-a4ba-6d19d02ccd52"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.685153 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-config" (OuterVolumeSpecName: "config") pod "84631f2f-64de-4a1a-a4ba-6d19d02ccd52" (UID: "84631f2f-64de-4a1a-a4ba-6d19d02ccd52"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.688618 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "84631f2f-64de-4a1a-a4ba-6d19d02ccd52" (UID: "84631f2f-64de-4a1a-a4ba-6d19d02ccd52"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.691219 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "84631f2f-64de-4a1a-a4ba-6d19d02ccd52" (UID: "84631f2f-64de-4a1a-a4ba-6d19d02ccd52"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.691570 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "84631f2f-64de-4a1a-a4ba-6d19d02ccd52" (UID: "84631f2f-64de-4a1a-a4ba-6d19d02ccd52"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.703951 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.707744 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.708128 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.708157 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.708171 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/84631f2f-64de-4a1a-a4ba-6d19d02ccd52-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.722511 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-7fd67558f8-nzmxr"] Nov 22 09:18:07 crc kubenswrapper[4693]: W1122 09:18:07.728435 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5651530_e21c_44f7_9e9c_3187b4fcd3c6.slice/crio-36931a83e1600c47448eb32f2c5f48eb06578d768d5ed843391aad62ccf3e488 WatchSource:0}: Error finding container 36931a83e1600c47448eb32f2c5f48eb06578d768d5ed843391aad62ccf3e488: Status 404 returned error can't find the container with id 36931a83e1600c47448eb32f2c5f48eb06578d768d5ed843391aad62ccf3e488 Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.730233 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-k75fh"] Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.837397 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5b95cfcf9c-8rmgm"] Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.853092 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-67dc677d8b-gqzpn"] Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.898010 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-cb6b7c77c-l62xw"] Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.910523 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-cb6b7c77c-l62xw"] Nov 22 09:18:07 crc kubenswrapper[4693]: I1122 09:18:07.916266 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7fbc84ccfc-8tdp6"] Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.003218 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:18:08 crc kubenswrapper[4693]: W1122 09:18:08.023543 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod648ffab2_f448_4bb1_a53f_1178c8dd8c32.slice/crio-b4a6bef1dfa5225bb1371161dcdb1e46af9c79f3deae47547bad2afc7c9fa395 WatchSource:0}: Error finding container b4a6bef1dfa5225bb1371161dcdb1e46af9c79f3deae47547bad2afc7c9fa395: Status 404 returned error can't find the container with id b4a6bef1dfa5225bb1371161dcdb1e46af9c79f3deae47547bad2afc7c9fa395 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.116291 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.163616 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" path="/var/lib/kubelet/pods/84631f2f-64de-4a1a-a4ba-6d19d02ccd52/volumes" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.552924 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-cb6b7c77c-l62xw" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.125:5353: i/o timeout" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.575043 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fbc84ccfc-8tdp6" event={"ID":"bd35a927-864c-4718-891f-1a036a99ddfb","Type":"ContainerStarted","Data":"48558b6c0b25398fd9c56f9b899e485546aada68b2cc2359f980155d1aa5d328"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.575096 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fbc84ccfc-8tdp6" event={"ID":"bd35a927-864c-4718-891f-1a036a99ddfb","Type":"ContainerStarted","Data":"9bd5325ba9e9ce882a1bf1399dc60a8ed61ce3f8aaffd62a0fbc8eb907132843"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.575116 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7fbc84ccfc-8tdp6" event={"ID":"bd35a927-864c-4718-891f-1a036a99ddfb","Type":"ContainerStarted","Data":"91a122872143959bee07af5c9c7869f1c746d9a677a3e118b32eb61fdca6f042"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.575153 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.577221 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fhrhl" event={"ID":"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9","Type":"ContainerStarted","Data":"8c1ad42526a5bb5229fb0809b8cef54e966ce4364be9da95831d425eddee6a26"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.586546 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k75fh" event={"ID":"b5651530-e21c-44f7-9e9c-3187b4fcd3c6","Type":"ContainerStarted","Data":"6d46ebf25a2e46264e2d6cd4dd05ef59d57cebd5c50e003f481b4af24693e4e4"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.586580 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k75fh" event={"ID":"b5651530-e21c-44f7-9e9c-3187b4fcd3c6","Type":"ContainerStarted","Data":"36931a83e1600c47448eb32f2c5f48eb06578d768d5ed843391aad62ccf3e488"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.595228 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7fbc84ccfc-8tdp6" podStartSLOduration=6.595201773 podStartE2EDuration="6.595201773s" podCreationTimestamp="2025-11-22 09:18:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:08.591194186 +0000 UTC m=+884.733696477" watchObservedRunningTime="2025-11-22 09:18:08.595201773 +0000 UTC m=+884.737704064" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.600012 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dc677d8b-gqzpn" event={"ID":"77e4ba75-fe85-4b6f-8946-30ab162512bc","Type":"ContainerStarted","Data":"fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.600046 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dc677d8b-gqzpn" event={"ID":"77e4ba75-fe85-4b6f-8946-30ab162512bc","Type":"ContainerStarted","Data":"8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.600077 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dc677d8b-gqzpn" event={"ID":"77e4ba75-fe85-4b6f-8946-30ab162512bc","Type":"ContainerStarted","Data":"fdab056b703451216023b15dadb963adc18f55552853ce193b79a980fb99e1f8"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.602352 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6bb9684445-rmchz" event={"ID":"6acf2b70-484b-4cae-97e0-2a999ef7dcef","Type":"ContainerStarted","Data":"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.602391 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6bb9684445-rmchz" event={"ID":"6acf2b70-484b-4cae-97e0-2a999ef7dcef","Type":"ContainerStarted","Data":"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.602519 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6bb9684445-rmchz" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon-log" containerID="cri-o://bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040" gracePeriod=30 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.602779 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6bb9684445-rmchz" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon" containerID="cri-o://03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72" gracePeriod=30 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.607374 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c9ccb65bf-fw9qk" event={"ID":"8cd0df95-6511-4220-8bdd-53ed795a0606","Type":"ContainerStarted","Data":"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.607423 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c9ccb65bf-fw9qk" event={"ID":"8cd0df95-6511-4220-8bdd-53ed795a0606","Type":"ContainerStarted","Data":"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.607531 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c9ccb65bf-fw9qk" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon-log" containerID="cri-o://f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2" gracePeriod=30 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.607621 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-c9ccb65bf-fw9qk" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon" containerID="cri-o://25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7" gracePeriod=30 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.608692 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-k75fh" podStartSLOduration=8.608660436 podStartE2EDuration="8.608660436s" podCreationTimestamp="2025-11-22 09:18:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:08.607037934 +0000 UTC m=+884.749540225" watchObservedRunningTime="2025-11-22 09:18:08.608660436 +0000 UTC m=+884.751162727" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.613424 4693 generic.go:334] "Generic (PLEG): container finished" podID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerID="565fbe1e14973696b65be3a617707d9fe3feb9b2f3acfae6ba85376e7deff02b" exitCode=0 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.613475 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" event={"ID":"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a","Type":"ContainerDied","Data":"565fbe1e14973696b65be3a617707d9fe3feb9b2f3acfae6ba85376e7deff02b"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.613497 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" event={"ID":"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a","Type":"ContainerStarted","Data":"c2a1cecb78f1516e85844623a3ee2d2649df62fde62c23d39436ccc8dc69c1be"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.621147 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c46c9cb9c-wcdw2" event={"ID":"e7353446-2b8c-48fc-8267-42b6f3ac0502","Type":"ContainerStarted","Data":"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.621182 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c46c9cb9c-wcdw2" event={"ID":"e7353446-2b8c-48fc-8267-42b6f3ac0502","Type":"ContainerStarted","Data":"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.621297 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c46c9cb9c-wcdw2" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon-log" containerID="cri-o://19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc" gracePeriod=30 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.621388 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-6c46c9cb9c-wcdw2" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon" containerID="cri-o://910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab" gracePeriod=30 Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.628107 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6bb9684445-rmchz" podStartSLOduration=2.751093576 podStartE2EDuration="24.62809094s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="2025-11-22 09:17:45.353354784 +0000 UTC m=+861.495857074" lastFinishedPulling="2025-11-22 09:18:07.230352146 +0000 UTC m=+883.372854438" observedRunningTime="2025-11-22 09:18:08.624593181 +0000 UTC m=+884.767095472" watchObservedRunningTime="2025-11-22 09:18:08.62809094 +0000 UTC m=+884.770593231" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.676992 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd67558f8-nzmxr" event={"ID":"dd3b9780-060c-4788-9800-20c1ac3b2e95","Type":"ContainerStarted","Data":"8e296d31702a8e5f983deb4637f13503470befe797e064740c493537c9209067"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.677039 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd67558f8-nzmxr" event={"ID":"dd3b9780-060c-4788-9800-20c1ac3b2e95","Type":"ContainerStarted","Data":"c44e4763bc5c509038074c7b75e88e8abfddb266804b3522a99b74426ac8715c"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.677052 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-7fd67558f8-nzmxr" event={"ID":"dd3b9780-060c-4788-9800-20c1ac3b2e95","Type":"ContainerStarted","Data":"22696a35a378587778fc5df4cfae227d7ec9401ce88b3058cb6ba3f1f00cd482"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.681055 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"648ffab2-f448-4bb1-a53f-1178c8dd8c32","Type":"ContainerStarted","Data":"b4a6bef1dfa5225bb1371161dcdb1e46af9c79f3deae47547bad2afc7c9fa395"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.686289 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-67dc677d8b-gqzpn" podStartSLOduration=15.686276483 podStartE2EDuration="15.686276483s" podCreationTimestamp="2025-11-22 09:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:08.669446871 +0000 UTC m=+884.811949161" watchObservedRunningTime="2025-11-22 09:18:08.686276483 +0000 UTC m=+884.828778773" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.695977 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e33acce5-e1e0-4dd7-96d3-620949d2e6ed","Type":"ContainerStarted","Data":"ad3eca5eb74db8e804c7fa8e4445cc36b609e8c81e54386fa69e7dac43169b73"} Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.725983 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-6c46c9cb9c-wcdw2" podStartSLOduration=3.330690771 podStartE2EDuration="24.72596127s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="2025-11-22 09:17:45.908389521 +0000 UTC m=+862.050891813" lastFinishedPulling="2025-11-22 09:18:07.30366002 +0000 UTC m=+883.446162312" observedRunningTime="2025-11-22 09:18:08.708436781 +0000 UTC m=+884.850939072" watchObservedRunningTime="2025-11-22 09:18:08.72596127 +0000 UTC m=+884.868463561" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.754827 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-c9ccb65bf-fw9qk" podStartSLOduration=2.966131897 podStartE2EDuration="22.754809157s" podCreationTimestamp="2025-11-22 09:17:46 +0000 UTC" firstStartedPulling="2025-11-22 09:17:47.452335536 +0000 UTC m=+863.594837827" lastFinishedPulling="2025-11-22 09:18:07.241012796 +0000 UTC m=+883.383515087" observedRunningTime="2025-11-22 09:18:08.726553645 +0000 UTC m=+884.869055936" watchObservedRunningTime="2025-11-22 09:18:08.754809157 +0000 UTC m=+884.897311448" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.789009 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-7fd67558f8-nzmxr" podStartSLOduration=15.78898868 podStartE2EDuration="15.78898868s" podCreationTimestamp="2025-11-22 09:17:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:08.751367243 +0000 UTC m=+884.893869544" watchObservedRunningTime="2025-11-22 09:18:08.78898868 +0000 UTC m=+884.931490972" Nov 22 09:18:08 crc kubenswrapper[4693]: I1122 09:18:08.922980 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-5f8cbc66c6-d5d77"] Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.702532 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8cbc66c6-d5d77" event={"ID":"6b70ae24-cb73-4a6b-8c10-6567f24991fe","Type":"ContainerStarted","Data":"a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.703039 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8cbc66c6-d5d77" event={"ID":"6b70ae24-cb73-4a6b-8c10-6567f24991fe","Type":"ContainerStarted","Data":"90f87db59d959deff7738a8fda51d85a99c03bb0113efc8c7f07706af26ca6f0"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.704288 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e33acce5-e1e0-4dd7-96d3-620949d2e6ed","Type":"ContainerStarted","Data":"94e8580de619ea53071d021e883694ef8a97f2451521f94e5370f31688423352"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.704317 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e33acce5-e1e0-4dd7-96d3-620949d2e6ed","Type":"ContainerStarted","Data":"dc8bee7fa151c8e85e188e167b44e73d8073a1fc91f60e2f436bf44e61f7ba2a"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.707678 4693 generic.go:334] "Generic (PLEG): container finished" podID="dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" containerID="8c1ad42526a5bb5229fb0809b8cef54e966ce4364be9da95831d425eddee6a26" exitCode=0 Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.707737 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fhrhl" event={"ID":"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9","Type":"ContainerDied","Data":"8c1ad42526a5bb5229fb0809b8cef54e966ce4364be9da95831d425eddee6a26"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.709274 4693 generic.go:334] "Generic (PLEG): container finished" podID="c37ecde5-29e1-4377-95be-2b7da9e65110" containerID="f4b4a89a35e7854bde49334ddf1b4372005107a04cd01768c7ab9a094aa10292" exitCode=0 Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.709323 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hvt8s" event={"ID":"c37ecde5-29e1-4377-95be-2b7da9e65110","Type":"ContainerDied","Data":"f4b4a89a35e7854bde49334ddf1b4372005107a04cd01768c7ab9a094aa10292"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.710862 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" event={"ID":"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a","Type":"ContainerStarted","Data":"6921216139c3b6736127f0e0de6b2900513ee8a69e5a9ba23748598040107c2f"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.711014 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.713156 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"648ffab2-f448-4bb1-a53f-1178c8dd8c32","Type":"ContainerStarted","Data":"aba38c12bf23ffadbc2f82662a7635f10a4c8679fec81c7deb2a47eb66442cbc"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.713271 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"648ffab2-f448-4bb1-a53f-1178c8dd8c32","Type":"ContainerStarted","Data":"cc046355824e69ccf74334f7538525fac3291381fab0182ebe08371f6df761e4"} Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.722613 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=10.722603536 podStartE2EDuration="10.722603536s" podCreationTimestamp="2025-11-22 09:17:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:09.718305403 +0000 UTC m=+885.860807694" watchObservedRunningTime="2025-11-22 09:18:09.722603536 +0000 UTC m=+885.865105828" Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.755316 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=10.755300673 podStartE2EDuration="10.755300673s" podCreationTimestamp="2025-11-22 09:17:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:09.749733562 +0000 UTC m=+885.892235853" watchObservedRunningTime="2025-11-22 09:18:09.755300673 +0000 UTC m=+885.897802964" Nov 22 09:18:09 crc kubenswrapper[4693]: I1122 09:18:09.802005 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" podStartSLOduration=9.801991005 podStartE2EDuration="9.801991005s" podCreationTimestamp="2025-11-22 09:18:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:09.796543871 +0000 UTC m=+885.939046161" watchObservedRunningTime="2025-11-22 09:18:09.801991005 +0000 UTC m=+885.944493296" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.111109 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.111428 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.135205 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.155911 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.469781 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.469815 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.493363 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.520952 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.728089 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerStarted","Data":"636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd"} Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.731070 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8cbc66c6-d5d77" event={"ID":"6b70ae24-cb73-4a6b-8c10-6567f24991fe","Type":"ContainerStarted","Data":"8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad"} Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.731567 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.731615 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.731627 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.731636 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.732433 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 09:18:10 crc kubenswrapper[4693]: I1122 09:18:10.757579 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-5f8cbc66c6-d5d77" podStartSLOduration=10.757562768 podStartE2EDuration="10.757562768s" podCreationTimestamp="2025-11-22 09:18:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:10.75319306 +0000 UTC m=+886.895695350" watchObservedRunningTime="2025-11-22 09:18:10.757562768 +0000 UTC m=+886.900065060" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.065456 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fhrhl" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.205961 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-combined-ca-bundle\") pod \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.206048 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-logs\") pod \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.206074 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gc8rm\" (UniqueName: \"kubernetes.io/projected/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-kube-api-access-gc8rm\") pod \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.206160 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-scripts\") pod \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.206201 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-config-data\") pod \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\" (UID: \"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.208468 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-logs" (OuterVolumeSpecName: "logs") pod "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" (UID: "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.223738 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-kube-api-access-gc8rm" (OuterVolumeSpecName: "kube-api-access-gc8rm") pod "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" (UID: "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9"). InnerVolumeSpecName "kube-api-access-gc8rm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.228746 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-scripts" (OuterVolumeSpecName: "scripts") pod "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" (UID: "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.251507 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-config-data" (OuterVolumeSpecName: "config-data") pod "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" (UID: "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.308432 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.308463 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gc8rm\" (UniqueName: \"kubernetes.io/projected/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-kube-api-access-gc8rm\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.308475 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.308483 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.320110 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.320367 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" (UID: "dacf4def-2cb8-4e5b-8761-e3a45a4a89d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.410244 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-db-sync-config-data\") pod \"c37ecde5-29e1-4377-95be-2b7da9e65110\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.410347 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psdtp\" (UniqueName: \"kubernetes.io/projected/c37ecde5-29e1-4377-95be-2b7da9e65110-kube-api-access-psdtp\") pod \"c37ecde5-29e1-4377-95be-2b7da9e65110\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.410385 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-combined-ca-bundle\") pod \"c37ecde5-29e1-4377-95be-2b7da9e65110\" (UID: \"c37ecde5-29e1-4377-95be-2b7da9e65110\") " Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.411295 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.418032 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c37ecde5-29e1-4377-95be-2b7da9e65110-kube-api-access-psdtp" (OuterVolumeSpecName: "kube-api-access-psdtp") pod "c37ecde5-29e1-4377-95be-2b7da9e65110" (UID: "c37ecde5-29e1-4377-95be-2b7da9e65110"). InnerVolumeSpecName "kube-api-access-psdtp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.424975 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "c37ecde5-29e1-4377-95be-2b7da9e65110" (UID: "c37ecde5-29e1-4377-95be-2b7da9e65110"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.436665 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c37ecde5-29e1-4377-95be-2b7da9e65110" (UID: "c37ecde5-29e1-4377-95be-2b7da9e65110"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.512725 4693 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.512997 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psdtp\" (UniqueName: \"kubernetes.io/projected/c37ecde5-29e1-4377-95be-2b7da9e65110-kube-api-access-psdtp\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.513008 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c37ecde5-29e1-4377-95be-2b7da9e65110-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.746511 4693 generic.go:334] "Generic (PLEG): container finished" podID="b5651530-e21c-44f7-9e9c-3187b4fcd3c6" containerID="6d46ebf25a2e46264e2d6cd4dd05ef59d57cebd5c50e003f481b4af24693e4e4" exitCode=0 Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.746710 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k75fh" event={"ID":"b5651530-e21c-44f7-9e9c-3187b4fcd3c6","Type":"ContainerDied","Data":"6d46ebf25a2e46264e2d6cd4dd05ef59d57cebd5c50e003f481b4af24693e4e4"} Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.750506 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-fhrhl" event={"ID":"dacf4def-2cb8-4e5b-8761-e3a45a4a89d9","Type":"ContainerDied","Data":"6ad6919e0ae24ac05c67aba1df8fcf3b3cb469d571c4cf8d228565f320003e60"} Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.750568 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6ad6919e0ae24ac05c67aba1df8fcf3b3cb469d571c4cf8d228565f320003e60" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.750646 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-fhrhl" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.753882 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-hvt8s" event={"ID":"c37ecde5-29e1-4377-95be-2b7da9e65110","Type":"ContainerDied","Data":"a99ecfb27d1d44b34ab97809d490af7f5072bb14e9f1903df2643685e9defc0a"} Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.753922 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a99ecfb27d1d44b34ab97809d490af7f5072bb14e9f1903df2643685e9defc0a" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.753960 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-hvt8s" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.985443 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-9f6899d66-t5sml"] Nov 22 09:18:11 crc kubenswrapper[4693]: E1122 09:18:11.985866 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" containerName="placement-db-sync" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.985886 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" containerName="placement-db-sync" Nov 22 09:18:11 crc kubenswrapper[4693]: E1122 09:18:11.985903 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="dnsmasq-dns" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.985910 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="dnsmasq-dns" Nov 22 09:18:11 crc kubenswrapper[4693]: E1122 09:18:11.985933 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c37ecde5-29e1-4377-95be-2b7da9e65110" containerName="barbican-db-sync" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.985940 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c37ecde5-29e1-4377-95be-2b7da9e65110" containerName="barbican-db-sync" Nov 22 09:18:11 crc kubenswrapper[4693]: E1122 09:18:11.985954 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="init" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.985959 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="init" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.986113 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c37ecde5-29e1-4377-95be-2b7da9e65110" containerName="barbican-db-sync" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.986133 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="84631f2f-64de-4a1a-a4ba-6d19d02ccd52" containerName="dnsmasq-dns" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.986145 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" containerName="placement-db-sync" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.987164 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.992779 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.992869 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.992959 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Nov 22 09:18:11 crc kubenswrapper[4693]: I1122 09:18:11.995241 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-mjz8r" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.008995 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.011076 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9f6899d66-t5sml"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.087257 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-6f9b77d547-xf85h"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.095259 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-64777f45c8-bwpgz"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.095481 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.097690 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.102029 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-bhk4q" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.102241 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.102455 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.102777 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6f9b77d547-xf85h"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.103022 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138035 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-89mck\" (UniqueName: \"kubernetes.io/projected/5b49963a-32e1-4500-969a-b7feaa78d4d3-kube-api-access-89mck\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138071 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-scripts\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138096 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-internal-tls-certs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138114 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-combined-ca-bundle\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138137 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-public-tls-certs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138156 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-config-data\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138180 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b49963a-32e1-4500-969a-b7feaa78d4d3-logs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.138377 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-64777f45c8-bwpgz"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.200267 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b95cfcf9c-8rmgm"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.200513 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerName="dnsmasq-dns" containerID="cri-o://6921216139c3b6736127f0e0de6b2900513ee8a69e5a9ba23748598040107c2f" gracePeriod=10 Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.239340 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-logs\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.247311 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-d4tj4"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249082 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249285 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94csb\" (UniqueName: \"kubernetes.io/projected/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-kube-api-access-94csb\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249338 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-combined-ca-bundle\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249368 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-config-data\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249440 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-89mck\" (UniqueName: \"kubernetes.io/projected/5b49963a-32e1-4500-969a-b7feaa78d4d3-kube-api-access-89mck\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249476 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-scripts\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249499 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-combined-ca-bundle\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249515 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-internal-tls-certs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249537 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-config-data\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249564 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-public-tls-certs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249586 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-config-data-custom\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249604 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-config-data\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249646 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b49963a-32e1-4500-969a-b7feaa78d4d3-logs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249665 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-combined-ca-bundle\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249691 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ea61845-fe45-4eed-a854-92545e309870-logs\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249853 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-55zbj\" (UniqueName: \"kubernetes.io/projected/2ea61845-fe45-4eed-a854-92545e309870-kube-api-access-55zbj\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.249879 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-config-data-custom\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.253480 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5b49963a-32e1-4500-969a-b7feaa78d4d3-logs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.259948 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-config-data\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.260996 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-d4tj4"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.263329 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-scripts\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.266624 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-public-tls-certs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.269066 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-internal-tls-certs\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.269289 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5b49963a-32e1-4500-969a-b7feaa78d4d3-combined-ca-bundle\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.284057 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-854d765ddd-vqvhg"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.285674 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.287300 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-89mck\" (UniqueName: \"kubernetes.io/projected/5b49963a-32e1-4500-969a-b7feaa78d4d3-kube-api-access-89mck\") pod \"placement-9f6899d66-t5sml\" (UID: \"5b49963a-32e1-4500-969a-b7feaa78d4d3\") " pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.288142 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.297590 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-854d765ddd-vqvhg"] Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.309308 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380156 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-combined-ca-bundle\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380482 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-nb\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380513 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ea61845-fe45-4eed-a854-92545e309870-logs\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380549 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-svc\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380578 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-combined-ca-bundle\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380666 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-config\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380705 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-55zbj\" (UniqueName: \"kubernetes.io/projected/2ea61845-fe45-4eed-a854-92545e309870-kube-api-access-55zbj\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380750 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-config-data-custom\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380777 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jwc5l\" (UniqueName: \"kubernetes.io/projected/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-kube-api-access-jwc5l\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380819 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-swift-storage-0\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380875 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380921 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9cdjn\" (UniqueName: \"kubernetes.io/projected/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-kube-api-access-9cdjn\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.380948 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-logs\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381005 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94csb\" (UniqueName: \"kubernetes.io/projected/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-kube-api-access-94csb\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381039 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-combined-ca-bundle\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381068 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-config-data\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381098 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-sb\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381160 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-config-data\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381196 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-config-data-custom\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381229 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-logs\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381256 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data-custom\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.381676 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2ea61845-fe45-4eed-a854-92545e309870-logs\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.382191 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-logs\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.384479 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-combined-ca-bundle\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.397736 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-55zbj\" (UniqueName: \"kubernetes.io/projected/2ea61845-fe45-4eed-a854-92545e309870-kube-api-access-55zbj\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.399480 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-config-data\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.401813 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94csb\" (UniqueName: \"kubernetes.io/projected/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-kube-api-access-94csb\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.408322 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-config-data-custom\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.413384 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-config-data-custom\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.413629 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40-combined-ca-bundle\") pod \"barbican-worker-6f9b77d547-xf85h\" (UID: \"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40\") " pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.426296 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ea61845-fe45-4eed-a854-92545e309870-config-data\") pod \"barbican-keystone-listener-64777f45c8-bwpgz\" (UID: \"2ea61845-fe45-4eed-a854-92545e309870\") " pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.430773 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-6f9b77d547-xf85h" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.450968 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482725 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-logs\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482771 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data-custom\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482810 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-nb\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482831 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-svc\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482869 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-combined-ca-bundle\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482911 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-config\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482936 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jwc5l\" (UniqueName: \"kubernetes.io/projected/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-kube-api-access-jwc5l\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482960 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-swift-storage-0\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.482983 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.483008 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9cdjn\" (UniqueName: \"kubernetes.io/projected/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-kube-api-access-9cdjn\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.483049 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-sb\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.483917 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-sb\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.484170 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-logs\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.485496 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-config\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.486014 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-nb\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.486523 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-svc\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.487646 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-swift-storage-0\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.490763 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-combined-ca-bundle\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.490942 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data-custom\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.494175 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.502290 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jwc5l\" (UniqueName: \"kubernetes.io/projected/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-kube-api-access-jwc5l\") pod \"barbican-api-854d765ddd-vqvhg\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.517449 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9cdjn\" (UniqueName: \"kubernetes.io/projected/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-kube-api-access-9cdjn\") pod \"dnsmasq-dns-66b66f7449-d4tj4\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.766913 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.774533 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.795006 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.796493 4693 generic.go:334] "Generic (PLEG): container finished" podID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerID="6921216139c3b6736127f0e0de6b2900513ee8a69e5a9ba23748598040107c2f" exitCode=0 Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.796687 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" event={"ID":"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a","Type":"ContainerDied","Data":"6921216139c3b6736127f0e0de6b2900513ee8a69e5a9ba23748598040107c2f"} Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.796737 4693 scope.go:117] "RemoveContainer" containerID="6921216139c3b6736127f0e0de6b2900513ee8a69e5a9ba23748598040107c2f" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.888449 4693 scope.go:117] "RemoveContainer" containerID="565fbe1e14973696b65be3a617707d9fe3feb9b2f3acfae6ba85376e7deff02b" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.892425 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-config\") pod \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.892517 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d62b\" (UniqueName: \"kubernetes.io/projected/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-kube-api-access-4d62b\") pod \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.892549 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-svc\") pod \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.892568 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-nb\") pod \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.892634 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-swift-storage-0\") pod \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.892689 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-sb\") pod \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\" (UID: \"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a\") " Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.912256 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-kube-api-access-4d62b" (OuterVolumeSpecName: "kube-api-access-4d62b") pod "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" (UID: "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a"). InnerVolumeSpecName "kube-api-access-4d62b". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.956030 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-config" (OuterVolumeSpecName: "config") pod "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" (UID: "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.958974 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" (UID: "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.983251 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" (UID: "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:12 crc kubenswrapper[4693]: I1122 09:18:12.984383 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-9f6899d66-t5sml"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.002124 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.002355 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d62b\" (UniqueName: \"kubernetes.io/projected/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-kube-api-access-4d62b\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.002365 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.002374 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.015133 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" (UID: "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.035572 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" (UID: "c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.105785 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.105819 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.324901 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-64777f45c8-bwpgz"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.336645 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-6f9b77d547-xf85h"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.374560 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.434050 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-854d765ddd-vqvhg"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.438832 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-d4tj4"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.513596 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-config-data\") pod \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.514446 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-scripts\") pod \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.514512 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-fernet-keys\") pod \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.514556 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-combined-ca-bundle\") pod \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.514675 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-credential-keys\") pod \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.514729 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftdmd\" (UniqueName: \"kubernetes.io/projected/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-kube-api-access-ftdmd\") pod \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\" (UID: \"b5651530-e21c-44f7-9e9c-3187b4fcd3c6\") " Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.525349 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-scripts" (OuterVolumeSpecName: "scripts") pod "b5651530-e21c-44f7-9e9c-3187b4fcd3c6" (UID: "b5651530-e21c-44f7-9e9c-3187b4fcd3c6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.527019 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-kube-api-access-ftdmd" (OuterVolumeSpecName: "kube-api-access-ftdmd") pod "b5651530-e21c-44f7-9e9c-3187b4fcd3c6" (UID: "b5651530-e21c-44f7-9e9c-3187b4fcd3c6"). InnerVolumeSpecName "kube-api-access-ftdmd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.527151 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "b5651530-e21c-44f7-9e9c-3187b4fcd3c6" (UID: "b5651530-e21c-44f7-9e9c-3187b4fcd3c6"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.530941 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "b5651530-e21c-44f7-9e9c-3187b4fcd3c6" (UID: "b5651530-e21c-44f7-9e9c-3187b4fcd3c6"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.545199 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5651530-e21c-44f7-9e9c-3187b4fcd3c6" (UID: "b5651530-e21c-44f7-9e9c-3187b4fcd3c6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.554622 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-config-data" (OuterVolumeSpecName: "config-data") pod "b5651530-e21c-44f7-9e9c-3187b4fcd3c6" (UID: "b5651530-e21c-44f7-9e9c-3187b4fcd3c6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.620421 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftdmd\" (UniqueName: \"kubernetes.io/projected/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-kube-api-access-ftdmd\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.620769 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.620784 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.620793 4693 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.620802 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.620810 4693 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/b5651530-e21c-44f7-9e9c-3187b4fcd3c6-credential-keys\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.694297 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.694402 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.806118 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f6899d66-t5sml" event={"ID":"5b49963a-32e1-4500-969a-b7feaa78d4d3","Type":"ContainerStarted","Data":"988af032a5a523902b9bd3c20ee38dfdbfaf789797ade4179df63ba65b6d4ab8"} Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.806157 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f6899d66-t5sml" event={"ID":"5b49963a-32e1-4500-969a-b7feaa78d4d3","Type":"ContainerStarted","Data":"b64256150c9e42c1345a9d0ca756978da091241f7b8261348a2d646ee2345b09"} Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.807223 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.807207 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5b95cfcf9c-8rmgm" event={"ID":"c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a","Type":"ContainerDied","Data":"c2a1cecb78f1516e85844623a3ee2d2649df62fde62c23d39436ccc8dc69c1be"} Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.816786 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-k75fh" event={"ID":"b5651530-e21c-44f7-9e9c-3187b4fcd3c6","Type":"ContainerDied","Data":"36931a83e1600c47448eb32f2c5f48eb06578d768d5ed843391aad62ccf3e488"} Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.816827 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36931a83e1600c47448eb32f2c5f48eb06578d768d5ed843391aad62ccf3e488" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.816795 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-k75fh" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.843692 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5b95cfcf9c-8rmgm"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.846544 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5b95cfcf9c-8rmgm"] Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.849983 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.850072 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.904722 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-5947459fbf-s5xqj"] Nov 22 09:18:13 crc kubenswrapper[4693]: E1122 09:18:13.905585 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerName="dnsmasq-dns" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.906573 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerName="dnsmasq-dns" Nov 22 09:18:13 crc kubenswrapper[4693]: E1122 09:18:13.906717 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5651530-e21c-44f7-9e9c-3187b4fcd3c6" containerName="keystone-bootstrap" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.906796 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5651530-e21c-44f7-9e9c-3187b4fcd3c6" containerName="keystone-bootstrap" Nov 22 09:18:13 crc kubenswrapper[4693]: E1122 09:18:13.907194 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerName="init" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.907378 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerName="init" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.907950 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" containerName="dnsmasq-dns" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.908071 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5651530-e21c-44f7-9e9c-3187b4fcd3c6" containerName="keystone-bootstrap" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.909030 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.914284 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.914542 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.915080 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-k575m" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.915272 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.915328 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.915291 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Nov 22 09:18:13 crc kubenswrapper[4693]: I1122 09:18:13.923731 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5947459fbf-s5xqj"] Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.028621 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-config-data\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.028680 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-public-tls-certs\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.028718 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-combined-ca-bundle\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.028790 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-fernet-keys\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.028808 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-internal-tls-certs\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.028826 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-scripts\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.029422 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-credential-keys\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.029617 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jjdp\" (UniqueName: \"kubernetes.io/projected/5d2c816b-f30a-4a96-8f72-5f023d95e3be-kube-api-access-4jjdp\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132274 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-fernet-keys\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132323 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-internal-tls-certs\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132344 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-scripts\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132405 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-credential-keys\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132462 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jjdp\" (UniqueName: \"kubernetes.io/projected/5d2c816b-f30a-4a96-8f72-5f023d95e3be-kube-api-access-4jjdp\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132507 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-config-data\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132531 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-public-tls-certs\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.132551 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-combined-ca-bundle\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.137826 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-config-data\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.141421 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-internal-tls-certs\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.147305 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-combined-ca-bundle\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.147355 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-credential-keys\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.147954 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-fernet-keys\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.153305 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-public-tls-certs\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.158074 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jjdp\" (UniqueName: \"kubernetes.io/projected/5d2c816b-f30a-4a96-8f72-5f023d95e3be-kube-api-access-4jjdp\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.159971 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5d2c816b-f30a-4a96-8f72-5f023d95e3be-scripts\") pod \"keystone-5947459fbf-s5xqj\" (UID: \"5d2c816b-f30a-4a96-8f72-5f023d95e3be\") " pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.165413 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a" path="/var/lib/kubelet/pods/c7a9ff23-7ce2-4d2a-99aa-1447141b7f4a/volumes" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.228308 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.791022 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-6875cccfb6-frgx5"] Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.793399 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.795347 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.795401 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.808355 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6875cccfb6-frgx5"] Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858696 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ls7c8\" (UniqueName: \"kubernetes.io/projected/01894220-3ce0-4535-9d19-95f573987428-kube-api-access-ls7c8\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858745 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/01894220-3ce0-4535-9d19-95f573987428-logs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858816 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-combined-ca-bundle\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858860 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-internal-tls-certs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858882 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-config-data\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858959 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-public-tls-certs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.858989 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-config-data-custom\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.861980 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960577 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-public-tls-certs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960671 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-config-data-custom\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960734 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ls7c8\" (UniqueName: \"kubernetes.io/projected/01894220-3ce0-4535-9d19-95f573987428-kube-api-access-ls7c8\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960753 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/01894220-3ce0-4535-9d19-95f573987428-logs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960905 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-combined-ca-bundle\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960964 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-internal-tls-certs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.960989 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-config-data\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.964929 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/01894220-3ce0-4535-9d19-95f573987428-logs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.966305 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-internal-tls-certs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.970180 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-public-tls-certs\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.970750 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-combined-ca-bundle\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.973689 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-config-data-custom\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.975095 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ls7c8\" (UniqueName: \"kubernetes.io/projected/01894220-3ce0-4535-9d19-95f573987428-kube-api-access-ls7c8\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:14 crc kubenswrapper[4693]: I1122 09:18:14.979337 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/01894220-3ce0-4535-9d19-95f573987428-config-data\") pod \"barbican-api-6875cccfb6-frgx5\" (UID: \"01894220-3ce0-4535-9d19-95f573987428\") " pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:15 crc kubenswrapper[4693]: I1122 09:18:15.123118 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:15 crc kubenswrapper[4693]: I1122 09:18:15.136495 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:18:16 crc kubenswrapper[4693]: W1122 09:18:16.110048 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2aa955a9_5d2e_45f4_bd28_5ca96ff1e172.slice/crio-240f24ec4317c3db2330b635ff80ddf23d9fccbd7907590fbd86a0bd73f86fc9 WatchSource:0}: Error finding container 240f24ec4317c3db2330b635ff80ddf23d9fccbd7907590fbd86a0bd73f86fc9: Status 404 returned error can't find the container with id 240f24ec4317c3db2330b635ff80ddf23d9fccbd7907590fbd86a0bd73f86fc9 Nov 22 09:18:16 crc kubenswrapper[4693]: I1122 09:18:16.848835 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-854d765ddd-vqvhg" event={"ID":"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172","Type":"ContainerStarted","Data":"240f24ec4317c3db2330b635ff80ddf23d9fccbd7907590fbd86a0bd73f86fc9"} Nov 22 09:18:16 crc kubenswrapper[4693]: I1122 09:18:16.979624 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:18:18 crc kubenswrapper[4693]: W1122 09:18:18.382738 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1cd57141_9a7a_4b43_aa1e_c1fcbd6b4c40.slice/crio-e3909f7d8b64ece964f046fa0d401d04b9150c8fecc998a9425046a2821784b2 WatchSource:0}: Error finding container e3909f7d8b64ece964f046fa0d401d04b9150c8fecc998a9425046a2821784b2: Status 404 returned error can't find the container with id e3909f7d8b64ece964f046fa0d401d04b9150c8fecc998a9425046a2821784b2 Nov 22 09:18:18 crc kubenswrapper[4693]: W1122 09:18:18.391657 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcfcddea0_5e6c_4ae4_b8af_d4438dc61861.slice/crio-60cc3c35ca133782a1e2c533724236efbe3f75a2d3b576b0d04ffdc0ac04519d WatchSource:0}: Error finding container 60cc3c35ca133782a1e2c533724236efbe3f75a2d3b576b0d04ffdc0ac04519d: Status 404 returned error can't find the container with id 60cc3c35ca133782a1e2c533724236efbe3f75a2d3b576b0d04ffdc0ac04519d Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.888994 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerStarted","Data":"bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.891422 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-9f6899d66-t5sml" event={"ID":"5b49963a-32e1-4500-969a-b7feaa78d4d3","Type":"ContainerStarted","Data":"089e100c668c7355a3f0c35c698ff3e71239bf792ff1367438f30517f8604ea8"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.891581 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.892085 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.895298 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/placement-9f6899d66-t5sml" podUID="5b49963a-32e1-4500-969a-b7feaa78d4d3" containerName="placement-log" probeResult="failure" output="Get \"https://10.217.0.154:8778/\": dial tcp 10.217.0.154:8778: connect: connection refused" Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.899050 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-854d765ddd-vqvhg" event={"ID":"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172","Type":"ContainerStarted","Data":"4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.904037 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" event={"ID":"2ea61845-fe45-4eed-a854-92545e309870","Type":"ContainerStarted","Data":"22210a2bd4aa1b5c10fd7d5f2f13a0453a01a2ce9416cc2ef68e3789f5aa5096"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.907047 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" event={"ID":"cfcddea0-5e6c-4ae4-b8af-d4438dc61861","Type":"ContainerStarted","Data":"3facd3da1e0cab358a7feb42dbe7a5ac98eb880da1116a99eade23eba7a9d2d0"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.907085 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" event={"ID":"cfcddea0-5e6c-4ae4-b8af-d4438dc61861","Type":"ContainerStarted","Data":"60cc3c35ca133782a1e2c533724236efbe3f75a2d3b576b0d04ffdc0ac04519d"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.908451 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f9b77d547-xf85h" event={"ID":"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40","Type":"ContainerStarted","Data":"e3909f7d8b64ece964f046fa0d401d04b9150c8fecc998a9425046a2821784b2"} Nov 22 09:18:18 crc kubenswrapper[4693]: I1122 09:18:18.941768 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-9f6899d66-t5sml" podStartSLOduration=7.941748669 podStartE2EDuration="7.941748669s" podCreationTimestamp="2025-11-22 09:18:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:18.91413852 +0000 UTC m=+895.056640812" watchObservedRunningTime="2025-11-22 09:18:18.941748669 +0000 UTC m=+895.084251001" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.009925 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-6875cccfb6-frgx5"] Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.223745 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-5947459fbf-s5xqj"] Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.940567 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5947459fbf-s5xqj" event={"ID":"5d2c816b-f30a-4a96-8f72-5f023d95e3be","Type":"ContainerStarted","Data":"c54f8aeb3dc7148e8e3461585cad6b681a44d293f1b7eaafc79f5eaaaf162e53"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.941030 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-5947459fbf-s5xqj" event={"ID":"5d2c816b-f30a-4a96-8f72-5f023d95e3be","Type":"ContainerStarted","Data":"5b62ff3512d3ae7883ff5a3aa06e5511cc4175b3cea2de2e3f3dfc9497003eae"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.941063 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.952429 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6875cccfb6-frgx5" event={"ID":"01894220-3ce0-4535-9d19-95f573987428","Type":"ContainerStarted","Data":"8ec7d2dff2b4b801d406c71498914d83b7117c0a82a94318f907bf88b14d2856"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.952493 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6875cccfb6-frgx5" event={"ID":"01894220-3ce0-4535-9d19-95f573987428","Type":"ContainerStarted","Data":"9db8f77b759ba1e0c9178a6b904609802b2286c6d3754dac8b8e54b86cd2b7e5"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.952506 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-6875cccfb6-frgx5" event={"ID":"01894220-3ce0-4535-9d19-95f573987428","Type":"ContainerStarted","Data":"8619aa31bd7d0a6743e3bd93698f2e971634bedce6878c4ea68abc0ce9abd447"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.954196 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.954237 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.959721 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-5947459fbf-s5xqj" podStartSLOduration=6.95970381 podStartE2EDuration="6.95970381s" podCreationTimestamp="2025-11-22 09:18:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:19.956825346 +0000 UTC m=+896.099327637" watchObservedRunningTime="2025-11-22 09:18:19.95970381 +0000 UTC m=+896.102206101" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.968517 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-854d765ddd-vqvhg" event={"ID":"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172","Type":"ContainerStarted","Data":"72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.969348 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.969383 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.971661 4693 generic.go:334] "Generic (PLEG): container finished" podID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerID="3facd3da1e0cab358a7feb42dbe7a5ac98eb880da1116a99eade23eba7a9d2d0" exitCode=0 Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.978117 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" event={"ID":"cfcddea0-5e6c-4ae4-b8af-d4438dc61861","Type":"ContainerDied","Data":"3facd3da1e0cab358a7feb42dbe7a5ac98eb880da1116a99eade23eba7a9d2d0"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.978158 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" event={"ID":"cfcddea0-5e6c-4ae4-b8af-d4438dc61861","Type":"ContainerStarted","Data":"8078cfe99d3fc165ed1069c6f1a6387c434c9b60a901d9e2d0800abe092f7983"} Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.978645 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-6875cccfb6-frgx5" podStartSLOduration=5.978629665 podStartE2EDuration="5.978629665s" podCreationTimestamp="2025-11-22 09:18:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:19.974210623 +0000 UTC m=+896.116712915" watchObservedRunningTime="2025-11-22 09:18:19.978629665 +0000 UTC m=+896.121131955" Nov 22 09:18:19 crc kubenswrapper[4693]: I1122 09:18:19.993045 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-854d765ddd-vqvhg" podStartSLOduration=7.993031161 podStartE2EDuration="7.993031161s" podCreationTimestamp="2025-11-22 09:18:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:19.990136015 +0000 UTC m=+896.132638296" watchObservedRunningTime="2025-11-22 09:18:19.993031161 +0000 UTC m=+896.135533452" Nov 22 09:18:20 crc kubenswrapper[4693]: I1122 09:18:20.011662 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" podStartSLOduration=8.011643606 podStartE2EDuration="8.011643606s" podCreationTimestamp="2025-11-22 09:18:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:20.004502937 +0000 UTC m=+896.147005228" watchObservedRunningTime="2025-11-22 09:18:20.011643606 +0000 UTC m=+896.154145897" Nov 22 09:18:20 crc kubenswrapper[4693]: I1122 09:18:20.985629 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" event={"ID":"2ea61845-fe45-4eed-a854-92545e309870","Type":"ContainerStarted","Data":"d2390e2161692ff8e7fcfc9e731b1deb90166ae383d24eb835d003fc4f68706a"} Nov 22 09:18:20 crc kubenswrapper[4693]: I1122 09:18:20.987866 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f9b77d547-xf85h" event={"ID":"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40","Type":"ContainerStarted","Data":"73322b6eca487bdf13b2a012229578e25b1a771f51ae0c7aaadb047cc27f17a3"} Nov 22 09:18:20 crc kubenswrapper[4693]: I1122 09:18:20.989094 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:21 crc kubenswrapper[4693]: I1122 09:18:21.045124 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:22 crc kubenswrapper[4693]: I1122 09:18:22.006468 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qjqt5" event={"ID":"be72ccb6-821d-4b5a-a4d0-6d866fc617f6","Type":"ContainerStarted","Data":"b2e002a07cf5bf2d7444ec4de3b8a5bf0e08b258e98f53ba30a9e12168868073"} Nov 22 09:18:22 crc kubenswrapper[4693]: I1122 09:18:22.008562 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" event={"ID":"2ea61845-fe45-4eed-a854-92545e309870","Type":"ContainerStarted","Data":"565163eeac2a49a7509def5bc3b9435a07a7c0c60d93217d50833f4e8ed1e375"} Nov 22 09:18:22 crc kubenswrapper[4693]: I1122 09:18:22.010864 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-6f9b77d547-xf85h" event={"ID":"1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40","Type":"ContainerStarted","Data":"3190499692ba50bf34cf793f8ae7a021b0eb20502f19053272379b901d5890fb"} Nov 22 09:18:22 crc kubenswrapper[4693]: I1122 09:18:22.022620 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-qjqt5" podStartSLOduration=3.311246231 podStartE2EDuration="38.022604955s" podCreationTimestamp="2025-11-22 09:17:44 +0000 UTC" firstStartedPulling="2025-11-22 09:17:45.936973802 +0000 UTC m=+862.079476093" lastFinishedPulling="2025-11-22 09:18:20.648332525 +0000 UTC m=+896.790834817" observedRunningTime="2025-11-22 09:18:22.019755164 +0000 UTC m=+898.162257455" watchObservedRunningTime="2025-11-22 09:18:22.022604955 +0000 UTC m=+898.165107246" Nov 22 09:18:22 crc kubenswrapper[4693]: I1122 09:18:22.041803 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-64777f45c8-bwpgz" podStartSLOduration=7.9352646159999995 podStartE2EDuration="10.041787632s" podCreationTimestamp="2025-11-22 09:18:12 +0000 UTC" firstStartedPulling="2025-11-22 09:18:18.429594389 +0000 UTC m=+894.572096681" lastFinishedPulling="2025-11-22 09:18:20.536117406 +0000 UTC m=+896.678619697" observedRunningTime="2025-11-22 09:18:22.033833722 +0000 UTC m=+898.176336014" watchObservedRunningTime="2025-11-22 09:18:22.041787632 +0000 UTC m=+898.184289923" Nov 22 09:18:22 crc kubenswrapper[4693]: I1122 09:18:22.061113 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-6f9b77d547-xf85h" podStartSLOduration=7.915715408 podStartE2EDuration="10.061097009s" podCreationTimestamp="2025-11-22 09:18:12 +0000 UTC" firstStartedPulling="2025-11-22 09:18:18.385879011 +0000 UTC m=+894.528381302" lastFinishedPulling="2025-11-22 09:18:20.531260612 +0000 UTC m=+896.673762903" observedRunningTime="2025-11-22 09:18:22.05641911 +0000 UTC m=+898.198921402" watchObservedRunningTime="2025-11-22 09:18:22.061097009 +0000 UTC m=+898.203599300" Nov 22 09:18:23 crc kubenswrapper[4693]: I1122 09:18:23.696745 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-67dc677d8b-gqzpn" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 22 09:18:23 crc kubenswrapper[4693]: I1122 09:18:23.851642 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-7fd67558f8-nzmxr" podUID="dd3b9780-060c-4788-9800-20c1ac3b2e95" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.147:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.147:8443: connect: connection refused" Nov 22 09:18:25 crc kubenswrapper[4693]: I1122 09:18:25.069393 4693 generic.go:334] "Generic (PLEG): container finished" podID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" containerID="b2e002a07cf5bf2d7444ec4de3b8a5bf0e08b258e98f53ba30a9e12168868073" exitCode=0 Nov 22 09:18:25 crc kubenswrapper[4693]: I1122 09:18:25.070673 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qjqt5" event={"ID":"be72ccb6-821d-4b5a-a4d0-6d866fc617f6","Type":"ContainerDied","Data":"b2e002a07cf5bf2d7444ec4de3b8a5bf0e08b258e98f53ba30a9e12168868073"} Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.550247 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.694568 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-6875cccfb6-frgx5" Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.762888 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-854d765ddd-vqvhg"] Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.763076 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-854d765ddd-vqvhg" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api-log" containerID="cri-o://4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36" gracePeriod=30 Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.763203 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-854d765ddd-vqvhg" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api" containerID="cri-o://72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8" gracePeriod=30 Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.774820 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-854d765ddd-vqvhg" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Nov 22 09:18:26 crc kubenswrapper[4693]: I1122 09:18:26.774909 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-854d765ddd-vqvhg" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": EOF" Nov 22 09:18:27 crc kubenswrapper[4693]: I1122 09:18:27.096098 4693 generic.go:334] "Generic (PLEG): container finished" podID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerID="4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36" exitCode=143 Nov 22 09:18:27 crc kubenswrapper[4693]: I1122 09:18:27.096360 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-854d765ddd-vqvhg" event={"ID":"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172","Type":"ContainerDied","Data":"4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36"} Nov 22 09:18:27 crc kubenswrapper[4693]: I1122 09:18:27.768977 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:27 crc kubenswrapper[4693]: I1122 09:18:27.821939 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798745f775-l9pkd"] Nov 22 09:18:27 crc kubenswrapper[4693]: I1122 09:18:27.822942 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-798745f775-l9pkd" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerName="dnsmasq-dns" containerID="cri-o://129172c7678c6e9585e9c7b29a6bb271ef2b812be58f7843dc66eedfd7309456" gracePeriod=10 Nov 22 09:18:28 crc kubenswrapper[4693]: I1122 09:18:28.107607 4693 generic.go:334] "Generic (PLEG): container finished" podID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerID="129172c7678c6e9585e9c7b29a6bb271ef2b812be58f7843dc66eedfd7309456" exitCode=0 Nov 22 09:18:28 crc kubenswrapper[4693]: I1122 09:18:28.107664 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-l9pkd" event={"ID":"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6","Type":"ContainerDied","Data":"129172c7678c6e9585e9c7b29a6bb271ef2b812be58f7843dc66eedfd7309456"} Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.362971 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.403170 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-combined-ca-bundle\") pod \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.403467 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-etc-machine-id\") pod \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.403518 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg4fm\" (UniqueName: \"kubernetes.io/projected/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-kube-api-access-qg4fm\") pod \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.403584 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-scripts\") pod \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.403614 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-config-data\") pod \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.403630 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-db-sync-config-data\") pod \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\" (UID: \"be72ccb6-821d-4b5a-a4d0-6d866fc617f6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.408352 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "be72ccb6-821d-4b5a-a4d0-6d866fc617f6" (UID: "be72ccb6-821d-4b5a-a4d0-6d866fc617f6"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.408771 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "be72ccb6-821d-4b5a-a4d0-6d866fc617f6" (UID: "be72ccb6-821d-4b5a-a4d0-6d866fc617f6"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.415191 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-kube-api-access-qg4fm" (OuterVolumeSpecName: "kube-api-access-qg4fm") pod "be72ccb6-821d-4b5a-a4d0-6d866fc617f6" (UID: "be72ccb6-821d-4b5a-a4d0-6d866fc617f6"). InnerVolumeSpecName "kube-api-access-qg4fm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.426043 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-scripts" (OuterVolumeSpecName: "scripts") pod "be72ccb6-821d-4b5a-a4d0-6d866fc617f6" (UID: "be72ccb6-821d-4b5a-a4d0-6d866fc617f6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.456814 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "be72ccb6-821d-4b5a-a4d0-6d866fc617f6" (UID: "be72ccb6-821d-4b5a-a4d0-6d866fc617f6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: E1122 09:18:29.467260 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.470392 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.475433 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-config-data" (OuterVolumeSpecName: "config-data") pod "be72ccb6-821d-4b5a-a4d0-6d866fc617f6" (UID: "be72ccb6-821d-4b5a-a4d0-6d866fc617f6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505005 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-knn99\" (UniqueName: \"kubernetes.io/projected/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-kube-api-access-knn99\") pod \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505051 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-config\") pod \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505072 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-svc\") pod \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505101 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-nb\") pod \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505121 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-swift-storage-0\") pod \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505149 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-sb\") pod \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\" (UID: \"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6\") " Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505512 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg4fm\" (UniqueName: \"kubernetes.io/projected/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-kube-api-access-qg4fm\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505530 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505539 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505549 4693 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505556 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.505565 4693 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/be72ccb6-821d-4b5a-a4d0-6d866fc617f6-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.515873 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-kube-api-access-knn99" (OuterVolumeSpecName: "kube-api-access-knn99") pod "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" (UID: "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6"). InnerVolumeSpecName "kube-api-access-knn99". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.539673 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-config" (OuterVolumeSpecName: "config") pod "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" (UID: "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.540235 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" (UID: "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.540985 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" (UID: "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.547571 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" (UID: "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.550296 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" (UID: "5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.606661 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.606687 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-knn99\" (UniqueName: \"kubernetes.io/projected/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-kube-api-access-knn99\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.606698 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.606709 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.606718 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:29 crc kubenswrapper[4693]: I1122 09:18:29.606726 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.132893 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerStarted","Data":"75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6"} Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.133006 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="ceilometer-notification-agent" containerID="cri-o://636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd" gracePeriod=30 Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.133073 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="proxy-httpd" containerID="cri-o://75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6" gracePeriod=30 Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.133122 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="sg-core" containerID="cri-o://bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7" gracePeriod=30 Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.133178 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.134350 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-qjqt5" event={"ID":"be72ccb6-821d-4b5a-a4d0-6d866fc617f6","Type":"ContainerDied","Data":"00f1c73ba54ff544e2bbbe6971b28a10af3a86bf9a9a38948b241c43b88365fa"} Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.134378 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="00f1c73ba54ff544e2bbbe6971b28a10af3a86bf9a9a38948b241c43b88365fa" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.134429 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-qjqt5" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.138294 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-798745f775-l9pkd" event={"ID":"5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6","Type":"ContainerDied","Data":"a02751ac6e7446db3b755770c00c3c26236d34605f3ccff905b8ba6679460fa9"} Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.138340 4693 scope.go:117] "RemoveContainer" containerID="129172c7678c6e9585e9c7b29a6bb271ef2b812be58f7843dc66eedfd7309456" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.138407 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-798745f775-l9pkd" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.158944 4693 scope.go:117] "RemoveContainer" containerID="ab5b2db282e5b8a4e86e7ad05df8c13cf4b68c11b7848d559a1069d101f9975a" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.179983 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-798745f775-l9pkd"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.185582 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-798745f775-l9pkd"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.247163 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.247486 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.706913 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-d2bqt"] Nov 22 09:18:30 crc kubenswrapper[4693]: E1122 09:18:30.707196 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" containerName="cinder-db-sync" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.707208 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" containerName="cinder-db-sync" Nov 22 09:18:30 crc kubenswrapper[4693]: E1122 09:18:30.707230 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerName="init" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.707236 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerName="init" Nov 22 09:18:30 crc kubenswrapper[4693]: E1122 09:18:30.707255 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerName="dnsmasq-dns" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.707260 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerName="dnsmasq-dns" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.707403 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" containerName="cinder-db-sync" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.707415 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" containerName="dnsmasq-dns" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.708159 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.714377 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.715633 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.721289 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-nt99g" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.721493 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.721614 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.731166 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-d2bqt"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.731804 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.747570 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858466 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858503 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858554 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zml9f\" (UniqueName: \"kubernetes.io/projected/916b3e9c-271f-49e1-8c84-b41fec6b7cca-kube-api-access-zml9f\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858587 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858604 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-config\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858621 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-svc\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858641 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858662 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858682 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/916b3e9c-271f-49e1-8c84-b41fec6b7cca-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858711 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jm8l\" (UniqueName: \"kubernetes.io/projected/8187f5e4-a62d-444b-99d8-694e067cb8f4-kube-api-access-9jm8l\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858743 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-scripts\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.858777 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.860582 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.862324 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.864500 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.882877 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.959993 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jm8l\" (UniqueName: \"kubernetes.io/projected/8187f5e4-a62d-444b-99d8-694e067cb8f4-kube-api-access-9jm8l\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.960055 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-scripts\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.960080 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0add0517-2e1f-4195-ada0-7f414d54afa0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.960113 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.960145 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwz64\" (UniqueName: \"kubernetes.io/projected/0add0517-2e1f-4195-ada0-7f414d54afa0-kube-api-access-dwz64\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.960955 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-swift-storage-0\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961017 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961039 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961391 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961414 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0add0517-2e1f-4195-ada0-7f414d54afa0-logs\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961433 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zml9f\" (UniqueName: \"kubernetes.io/projected/916b3e9c-271f-49e1-8c84-b41fec6b7cca-kube-api-access-zml9f\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961460 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-scripts\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961477 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961495 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-config\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961508 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-svc\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961525 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data-custom\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961543 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961563 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961579 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/916b3e9c-271f-49e1-8c84-b41fec6b7cca-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961603 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.961814 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-nb\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.962364 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-svc\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.962878 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/916b3e9c-271f-49e1-8c84-b41fec6b7cca-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.963253 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-config\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.963422 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-sb\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.966684 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.969215 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.969897 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.981582 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zml9f\" (UniqueName: \"kubernetes.io/projected/916b3e9c-271f-49e1-8c84-b41fec6b7cca-kube-api-access-zml9f\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.985411 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jm8l\" (UniqueName: \"kubernetes.io/projected/8187f5e4-a62d-444b-99d8-694e067cb8f4-kube-api-access-9jm8l\") pod \"dnsmasq-dns-7965876c4f-d2bqt\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:30 crc kubenswrapper[4693]: I1122 09:18:30.996295 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-scripts\") pod \"cinder-scheduler-0\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.035445 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.042478 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065036 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065136 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0add0517-2e1f-4195-ada0-7f414d54afa0-logs\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065283 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-scripts\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065334 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data-custom\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065445 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065542 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0add0517-2e1f-4195-ada0-7f414d54afa0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065692 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwz64\" (UniqueName: \"kubernetes.io/projected/0add0517-2e1f-4195-ada0-7f414d54afa0-kube-api-access-dwz64\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065717 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0add0517-2e1f-4195-ada0-7f414d54afa0-logs\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.065877 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0add0517-2e1f-4195-ada0-7f414d54afa0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.069614 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.070200 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-scripts\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.074604 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data-custom\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.079448 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.081421 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwz64\" (UniqueName: \"kubernetes.io/projected/0add0517-2e1f-4195-ada0-7f414d54afa0-kube-api-access-dwz64\") pod \"cinder-api-0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.096425 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.185363 4693 generic.go:334] "Generic (PLEG): container finished" podID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerID="75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6" exitCode=0 Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.185409 4693 generic.go:334] "Generic (PLEG): container finished" podID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerID="bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7" exitCode=2 Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.185464 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerDied","Data":"75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6"} Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.185499 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerDied","Data":"bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7"} Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.213138 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.647884 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-d2bqt"] Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.657142 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.747019 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.815377 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929582 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-scripts\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929703 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-run-httpd\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929810 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qqfwf\" (UniqueName: \"kubernetes.io/projected/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-kube-api-access-qqfwf\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929861 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-log-httpd\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929881 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-combined-ca-bundle\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929926 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-config-data\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.929979 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-sg-core-conf-yaml\") pod \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\" (UID: \"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d\") " Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.930945 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.932184 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.935988 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-kube-api-access-qqfwf" (OuterVolumeSpecName: "kube-api-access-qqfwf") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "kube-api-access-qqfwf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.936025 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-scripts" (OuterVolumeSpecName: "scripts") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.961357 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:31 crc kubenswrapper[4693]: I1122 09:18:31.982933 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.032937 4693 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.032971 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qqfwf\" (UniqueName: \"kubernetes.io/projected/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-kube-api-access-qqfwf\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.032984 4693 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.032992 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.033000 4693 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.033009 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.042898 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-config-data" (OuterVolumeSpecName: "config-data") pod "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" (UID: "079d82bf-3312-4f7f-8ef8-6d55b20c8f5d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.135537 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.160636 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6" path="/var/lib/kubelet/pods/5340fdb0-bc27-4861-bbba-f5d0ccd1e4a6/volumes" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.214222 4693 generic.go:334] "Generic (PLEG): container finished" podID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerID="774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f" exitCode=0 Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.214290 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" event={"ID":"8187f5e4-a62d-444b-99d8-694e067cb8f4","Type":"ContainerDied","Data":"774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f"} Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.214314 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" event={"ID":"8187f5e4-a62d-444b-99d8-694e067cb8f4","Type":"ContainerStarted","Data":"e68fcd8f44a258f87fd1f60db2c0e6b6d035175373502543a802dba50edc2849"} Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.214438 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-854d765ddd-vqvhg" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:34798->10.217.0.158:9311: read: connection reset by peer" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.214828 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-854d765ddd-vqvhg" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": read tcp 10.217.0.2:34794->10.217.0.158:9311: read: connection reset by peer" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.221521 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0add0517-2e1f-4195-ada0-7f414d54afa0","Type":"ContainerStarted","Data":"fc320868d59998df50348d6d79646277c7cd7dc57f89670415924246697471cb"} Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.228456 4693 generic.go:334] "Generic (PLEG): container finished" podID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerID="636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd" exitCode=0 Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.228533 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerDied","Data":"636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd"} Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.228552 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"079d82bf-3312-4f7f-8ef8-6d55b20c8f5d","Type":"ContainerDied","Data":"38fc368c6834eaa71626b74a4acf077e8c8881fea8ee04f37776ac1fb11a3ae2"} Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.228631 4693 scope.go:117] "RemoveContainer" containerID="75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.228796 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.253765 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"916b3e9c-271f-49e1-8c84-b41fec6b7cca","Type":"ContainerStarted","Data":"1a9c590ec0a5e8df582c303591fe3adb2b0a947f9e943df60f39c2685c1a9b88"} Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.384187 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.397601 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.431871 4693 scope.go:117] "RemoveContainer" containerID="bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.526279 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.543621 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.544755 4693 scope.go:117] "RemoveContainer" containerID="636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.548660 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:32 crc kubenswrapper[4693]: E1122 09:18:32.549249 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="sg-core" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.549269 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="sg-core" Nov 22 09:18:32 crc kubenswrapper[4693]: E1122 09:18:32.549292 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="ceilometer-notification-agent" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.549300 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="ceilometer-notification-agent" Nov 22 09:18:32 crc kubenswrapper[4693]: E1122 09:18:32.549318 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="proxy-httpd" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.549338 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="proxy-httpd" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.549535 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="sg-core" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.549564 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="ceilometer-notification-agent" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.549578 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" containerName="proxy-httpd" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.551534 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.554115 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.554453 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.554617 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568152 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-run-httpd\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568190 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-log-httpd\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568239 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-config-data\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568268 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-scripts\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568288 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568314 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnfrs\" (UniqueName: \"kubernetes.io/projected/4185887a-3a26-4abc-ab25-53033cc7e940-kube-api-access-mnfrs\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.568383 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.599950 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.623955 4693 scope.go:117] "RemoveContainer" containerID="75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6" Nov 22 09:18:32 crc kubenswrapper[4693]: E1122 09:18:32.627462 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6\": container with ID starting with 75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6 not found: ID does not exist" containerID="75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.627501 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6"} err="failed to get container status \"75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6\": rpc error: code = NotFound desc = could not find container \"75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6\": container with ID starting with 75b67d0da81f6d856a96920746eb0c36df4b4f9c7c1f4cb27afc8b34b787c3e6 not found: ID does not exist" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.627529 4693 scope.go:117] "RemoveContainer" containerID="bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7" Nov 22 09:18:32 crc kubenswrapper[4693]: E1122 09:18:32.631114 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7\": container with ID starting with bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7 not found: ID does not exist" containerID="bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.631144 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7"} err="failed to get container status \"bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7\": rpc error: code = NotFound desc = could not find container \"bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7\": container with ID starting with bc3363173e472c5242cded0759f762a930927713974d16ea0231f7b149f2e5b7 not found: ID does not exist" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.631163 4693 scope.go:117] "RemoveContainer" containerID="636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd" Nov 22 09:18:32 crc kubenswrapper[4693]: E1122 09:18:32.634451 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd\": container with ID starting with 636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd not found: ID does not exist" containerID="636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.634491 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd"} err="failed to get container status \"636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd\": rpc error: code = NotFound desc = could not find container \"636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd\": container with ID starting with 636cbae9d73b5a99895c17d3a8c7f9f3c3ea46a8c39f1012bc138fa36e3b4ebd not found: ID does not exist" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670031 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-run-httpd\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670069 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-log-httpd\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670110 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-config-data\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670138 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-scripts\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670159 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670189 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnfrs\" (UniqueName: \"kubernetes.io/projected/4185887a-3a26-4abc-ab25-53033cc7e940-kube-api-access-mnfrs\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670257 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.670437 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-run-httpd\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.671554 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-log-httpd\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.674010 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.679762 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.683740 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-scripts\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.685904 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-config-data\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.711952 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.722466 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnfrs\" (UniqueName: \"kubernetes.io/projected/4185887a-3a26-4abc-ab25-53033cc7e940-kube-api-access-mnfrs\") pod \"ceilometer-0\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.742534 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.883241 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-combined-ca-bundle\") pod \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.884514 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jwc5l\" (UniqueName: \"kubernetes.io/projected/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-kube-api-access-jwc5l\") pod \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.884665 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-logs\") pod \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.884825 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data-custom\") pod \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.885274 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data\") pod \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\" (UID: \"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172\") " Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.894556 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-logs" (OuterVolumeSpecName: "logs") pod "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" (UID: "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.894759 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-kube-api-access-jwc5l" (OuterVolumeSpecName: "kube-api-access-jwc5l") pod "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" (UID: "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172"). InnerVolumeSpecName "kube-api-access-jwc5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.896417 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.900039 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" (UID: "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.941974 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" (UID: "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.989313 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.989344 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jwc5l\" (UniqueName: \"kubernetes.io/projected/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-kube-api-access-jwc5l\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.989355 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.989363 4693 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:32 crc kubenswrapper[4693]: I1122 09:18:32.990677 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data" (OuterVolumeSpecName: "config-data") pod "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" (UID: "2aa955a9-5d2e-45f4-bd28-5ca96ff1e172"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.091284 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.120541 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7fbc84ccfc-8tdp6" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.182901 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5f8cbc66c6-d5d77"] Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.183503 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5f8cbc66c6-d5d77" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-api" containerID="cri-o://a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8" gracePeriod=30 Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.183685 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-5f8cbc66c6-d5d77" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-httpd" containerID="cri-o://8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad" gracePeriod=30 Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.290677 4693 generic.go:334] "Generic (PLEG): container finished" podID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerID="72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8" exitCode=0 Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.290875 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-854d765ddd-vqvhg" event={"ID":"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172","Type":"ContainerDied","Data":"72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8"} Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.290977 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-854d765ddd-vqvhg" event={"ID":"2aa955a9-5d2e-45f4-bd28-5ca96ff1e172","Type":"ContainerDied","Data":"240f24ec4317c3db2330b635ff80ddf23d9fccbd7907590fbd86a0bd73f86fc9"} Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.291007 4693 scope.go:117] "RemoveContainer" containerID="72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.292349 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-854d765ddd-vqvhg" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.302209 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" event={"ID":"8187f5e4-a62d-444b-99d8-694e067cb8f4","Type":"ContainerStarted","Data":"a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6"} Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.302809 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.329856 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0add0517-2e1f-4195-ada0-7f414d54afa0","Type":"ContainerStarted","Data":"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0"} Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.348491 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" podStartSLOduration=3.348473381 podStartE2EDuration="3.348473381s" podCreationTimestamp="2025-11-22 09:18:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:33.342876915 +0000 UTC m=+909.485379206" watchObservedRunningTime="2025-11-22 09:18:33.348473381 +0000 UTC m=+909.490975672" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.360124 4693 scope.go:117] "RemoveContainer" containerID="4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.375614 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-854d765ddd-vqvhg"] Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.376220 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-854d765ddd-vqvhg"] Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.404866 4693 scope.go:117] "RemoveContainer" containerID="72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8" Nov 22 09:18:33 crc kubenswrapper[4693]: E1122 09:18:33.405624 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8\": container with ID starting with 72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8 not found: ID does not exist" containerID="72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.405671 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8"} err="failed to get container status \"72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8\": rpc error: code = NotFound desc = could not find container \"72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8\": container with ID starting with 72b17c7b7a324af8b3136ce6e8005b6f2c2b0cf8191b1bd03e88330c1ff720a8 not found: ID does not exist" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.405705 4693 scope.go:117] "RemoveContainer" containerID="4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36" Nov 22 09:18:33 crc kubenswrapper[4693]: E1122 09:18:33.406241 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36\": container with ID starting with 4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36 not found: ID does not exist" containerID="4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.406291 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36"} err="failed to get container status \"4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36\": rpc error: code = NotFound desc = could not find container \"4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36\": container with ID starting with 4320bdd36f399481e16c0fa32626024b42aae232b8fc809884e6278850ca4b36 not found: ID does not exist" Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.409761 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:33 crc kubenswrapper[4693]: I1122 09:18:33.433939 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.163341 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="079d82bf-3312-4f7f-8ef8-6d55b20c8f5d" path="/var/lib/kubelet/pods/079d82bf-3312-4f7f-8ef8-6d55b20c8f5d/volumes" Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.164693 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" path="/var/lib/kubelet/pods/2aa955a9-5d2e-45f4-bd28-5ca96ff1e172/volumes" Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.340208 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api-log" containerID="cri-o://c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0" gracePeriod=30 Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.340514 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0add0517-2e1f-4195-ada0-7f414d54afa0","Type":"ContainerStarted","Data":"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008"} Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.340568 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.340864 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api" containerID="cri-o://113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008" gracePeriod=30 Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.353659 4693 generic.go:334] "Generic (PLEG): container finished" podID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerID="8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad" exitCode=0 Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.353712 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8cbc66c6-d5d77" event={"ID":"6b70ae24-cb73-4a6b-8c10-6567f24991fe","Type":"ContainerDied","Data":"8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad"} Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.366809 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"916b3e9c-271f-49e1-8c84-b41fec6b7cca","Type":"ContainerStarted","Data":"99fcc5431e24b22d409844bd2d38a9737cb2b98ac5503ccc95ebe32368a1f934"} Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.366855 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"916b3e9c-271f-49e1-8c84-b41fec6b7cca","Type":"ContainerStarted","Data":"38e1d3bc7bc5a77353238d3c6c503c80f529dc66c1dfc7aec4b59ba2c6960e37"} Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.369086 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.369070621 podStartE2EDuration="4.369070621s" podCreationTimestamp="2025-11-22 09:18:30 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:34.364673571 +0000 UTC m=+910.507175862" watchObservedRunningTime="2025-11-22 09:18:34.369070621 +0000 UTC m=+910.511572912" Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.375035 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerStarted","Data":"da291a940212e51760a6510d87012fd7883b06ac24252cef062ed4da78eec894"} Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.375089 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerStarted","Data":"ff22b15971f3f675dd812e2f7f4ce2c43d946d27f53a5eae904e85e4ab6c19da"} Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.862710 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 09:18:34 crc kubenswrapper[4693]: I1122 09:18:34.885741 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.6812298759999997 podStartE2EDuration="4.885714594s" podCreationTimestamp="2025-11-22 09:18:30 +0000 UTC" firstStartedPulling="2025-11-22 09:18:31.660441715 +0000 UTC m=+907.802944007" lastFinishedPulling="2025-11-22 09:18:32.864926435 +0000 UTC m=+909.007428725" observedRunningTime="2025-11-22 09:18:34.382892777 +0000 UTC m=+910.525395068" watchObservedRunningTime="2025-11-22 09:18:34.885714594 +0000 UTC m=+911.028216886" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055240 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055347 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-combined-ca-bundle\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055411 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0add0517-2e1f-4195-ada0-7f414d54afa0-logs\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055454 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0add0517-2e1f-4195-ada0-7f414d54afa0-etc-machine-id\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055495 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwz64\" (UniqueName: \"kubernetes.io/projected/0add0517-2e1f-4195-ada0-7f414d54afa0-kube-api-access-dwz64\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055520 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-scripts\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.055552 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data-custom\") pod \"0add0517-2e1f-4195-ada0-7f414d54afa0\" (UID: \"0add0517-2e1f-4195-ada0-7f414d54afa0\") " Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.056003 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0add0517-2e1f-4195-ada0-7f414d54afa0-logs" (OuterVolumeSpecName: "logs") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.056648 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0add0517-2e1f-4195-ada0-7f414d54afa0-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.056750 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0add0517-2e1f-4195-ada0-7f414d54afa0-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.073707 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0add0517-2e1f-4195-ada0-7f414d54afa0-kube-api-access-dwz64" (OuterVolumeSpecName: "kube-api-access-dwz64") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "kube-api-access-dwz64". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.073863 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.079813 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-scripts" (OuterVolumeSpecName: "scripts") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.084894 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.104649 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data" (OuterVolumeSpecName: "config-data") pod "0add0517-2e1f-4195-ada0-7f414d54afa0" (UID: "0add0517-2e1f-4195-ada0-7f414d54afa0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.158452 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.158678 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.158691 4693 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0add0517-2e1f-4195-ada0-7f414d54afa0-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.158699 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwz64\" (UniqueName: \"kubernetes.io/projected/0add0517-2e1f-4195-ada0-7f414d54afa0-kube-api-access-dwz64\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.158707 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.158715 4693 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0add0517-2e1f-4195-ada0-7f414d54afa0-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.388869 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerStarted","Data":"c65da8152721e6d62dd7a5a2d4ccb755bd75ce02af2c4e5f93c220c8fb5e6c14"} Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400161 4693 generic.go:334] "Generic (PLEG): container finished" podID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerID="113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008" exitCode=0 Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400194 4693 generic.go:334] "Generic (PLEG): container finished" podID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerID="c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0" exitCode=143 Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400242 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0add0517-2e1f-4195-ada0-7f414d54afa0","Type":"ContainerDied","Data":"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008"} Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400256 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400291 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0add0517-2e1f-4195-ada0-7f414d54afa0","Type":"ContainerDied","Data":"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0"} Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400304 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0add0517-2e1f-4195-ada0-7f414d54afa0","Type":"ContainerDied","Data":"fc320868d59998df50348d6d79646277c7cd7dc57f89670415924246697471cb"} Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.400308 4693 scope.go:117] "RemoveContainer" containerID="113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.451661 4693 scope.go:117] "RemoveContainer" containerID="c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.455896 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.463578 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.499901 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:35 crc kubenswrapper[4693]: E1122 09:18:35.500478 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.500566 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api" Nov 22 09:18:35 crc kubenswrapper[4693]: E1122 09:18:35.500629 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api-log" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.500680 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api-log" Nov 22 09:18:35 crc kubenswrapper[4693]: E1122 09:18:35.500736 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api-log" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.500786 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api-log" Nov 22 09:18:35 crc kubenswrapper[4693]: E1122 09:18:35.500858 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.500922 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.501197 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.501272 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.501346 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2aa955a9-5d2e-45f4-bd28-5ca96ff1e172" containerName="barbican-api-log" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.501410 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" containerName="cinder-api-log" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.502424 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.507348 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.507563 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.507603 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.509786 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.528341 4693 scope.go:117] "RemoveContainer" containerID="113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008" Nov 22 09:18:35 crc kubenswrapper[4693]: E1122 09:18:35.529000 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008\": container with ID starting with 113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008 not found: ID does not exist" containerID="113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.529033 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008"} err="failed to get container status \"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008\": rpc error: code = NotFound desc = could not find container \"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008\": container with ID starting with 113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008 not found: ID does not exist" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.529057 4693 scope.go:117] "RemoveContainer" containerID="c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0" Nov 22 09:18:35 crc kubenswrapper[4693]: E1122 09:18:35.529319 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0\": container with ID starting with c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0 not found: ID does not exist" containerID="c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.529351 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0"} err="failed to get container status \"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0\": rpc error: code = NotFound desc = could not find container \"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0\": container with ID starting with c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0 not found: ID does not exist" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.529372 4693 scope.go:117] "RemoveContainer" containerID="113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.529657 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008"} err="failed to get container status \"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008\": rpc error: code = NotFound desc = could not find container \"113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008\": container with ID starting with 113006441f56c0fda8783066724f7a0b478839938767553d30d0102da5a90008 not found: ID does not exist" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.529677 4693 scope.go:117] "RemoveContainer" containerID="c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.530805 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0"} err="failed to get container status \"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0\": rpc error: code = NotFound desc = could not find container \"c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0\": container with ID starting with c2941c8998941a1d9b16d6b4c18c87509f8ea7e65747815972e2e79145143ec0 not found: ID does not exist" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.577804 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-config-data\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.577879 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-scripts\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.577923 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-config-data-custom\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.578167 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab074cff-55c1-4cc2-ac57-05c7948418c0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.578241 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.578309 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8g2q4\" (UniqueName: \"kubernetes.io/projected/ab074cff-55c1-4cc2-ac57-05c7948418c0-kube-api-access-8g2q4\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.578385 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab074cff-55c1-4cc2-ac57-05c7948418c0-logs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.578409 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.578425 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.679992 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680051 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8g2q4\" (UniqueName: \"kubernetes.io/projected/ab074cff-55c1-4cc2-ac57-05c7948418c0-kube-api-access-8g2q4\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680091 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab074cff-55c1-4cc2-ac57-05c7948418c0-logs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680111 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680128 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680167 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-config-data\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680187 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-scripts\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680206 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-config-data-custom\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680290 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab074cff-55c1-4cc2-ac57-05c7948418c0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680381 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ab074cff-55c1-4cc2-ac57-05c7948418c0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.680736 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ab074cff-55c1-4cc2-ac57-05c7948418c0-logs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.684975 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.685718 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-scripts\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.685823 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.686018 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-config-data\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.686919 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-config-data-custom\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.687499 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ab074cff-55c1-4cc2-ac57-05c7948418c0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.696390 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8g2q4\" (UniqueName: \"kubernetes.io/projected/ab074cff-55c1-4cc2-ac57-05c7948418c0-kube-api-access-8g2q4\") pod \"cinder-api-0\" (UID: \"ab074cff-55c1-4cc2-ac57-05c7948418c0\") " pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.696682 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.827544 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Nov 22 09:18:35 crc kubenswrapper[4693]: I1122 09:18:35.832963 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:18:36 crc kubenswrapper[4693]: I1122 09:18:36.043643 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 09:18:36 crc kubenswrapper[4693]: I1122 09:18:36.159114 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0add0517-2e1f-4195-ada0-7f414d54afa0" path="/var/lib/kubelet/pods/0add0517-2e1f-4195-ada0-7f414d54afa0/volumes" Nov 22 09:18:36 crc kubenswrapper[4693]: I1122 09:18:36.253408 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Nov 22 09:18:36 crc kubenswrapper[4693]: I1122 09:18:36.420938 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ab074cff-55c1-4cc2-ac57-05c7948418c0","Type":"ContainerStarted","Data":"f762123cbd45711d09a7b24be188771cb31d2f51a6f114d04390ea74c759357a"} Nov 22 09:18:36 crc kubenswrapper[4693]: I1122 09:18:36.425212 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerStarted","Data":"9af482365a16ff5995fc63fc8a8ccf084fb07bcdb6ca237f7923d5b95b8eaac7"} Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.184547 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.433685 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerStarted","Data":"f21b31973fff4ae5e32f576c352f67ac8875ad86f1f3234a5b38a8d5eb4443bf"} Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.434037 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.435497 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ab074cff-55c1-4cc2-ac57-05c7948418c0","Type":"ContainerStarted","Data":"6e320dabb8737463d219fc88856ee3b1ccbf55c264c761cfb52aadc0e98132bb"} Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.435550 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"ab074cff-55c1-4cc2-ac57-05c7948418c0","Type":"ContainerStarted","Data":"c3bc23b4f8ce05484c3bd0b2363f124952f7b21d1b3a9ec16d6aa346c23fd553"} Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.435752 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.455737 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.022564907 podStartE2EDuration="5.455720803s" podCreationTimestamp="2025-11-22 09:18:32 +0000 UTC" firstStartedPulling="2025-11-22 09:18:33.4348793 +0000 UTC m=+909.577381591" lastFinishedPulling="2025-11-22 09:18:36.868035196 +0000 UTC m=+913.010537487" observedRunningTime="2025-11-22 09:18:37.453211354 +0000 UTC m=+913.595713635" watchObservedRunningTime="2025-11-22 09:18:37.455720803 +0000 UTC m=+913.598223095" Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.473018 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=2.473001073 podStartE2EDuration="2.473001073s" podCreationTimestamp="2025-11-22 09:18:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:37.469020206 +0000 UTC m=+913.611522497" watchObservedRunningTime="2025-11-22 09:18:37.473001073 +0000 UTC m=+913.615503364" Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.489158 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-7fd67558f8-nzmxr" Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.549790 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-67dc677d8b-gqzpn"] Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.550037 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67dc677d8b-gqzpn" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon-log" containerID="cri-o://8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82" gracePeriod=30 Nov 22 09:18:37 crc kubenswrapper[4693]: I1122 09:18:37.550440 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-67dc677d8b-gqzpn" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" containerID="cri-o://fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30" gracePeriod=30 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.067713 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.188737 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6acf2b70-484b-4cae-97e0-2a999ef7dcef-horizon-secret-key\") pod \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.189100 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6acf2b70-484b-4cae-97e0-2a999ef7dcef-logs\") pod \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.189131 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-scripts\") pod \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.189315 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-config-data\") pod \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.189413 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8574h\" (UniqueName: \"kubernetes.io/projected/6acf2b70-484b-4cae-97e0-2a999ef7dcef-kube-api-access-8574h\") pod \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\" (UID: \"6acf2b70-484b-4cae-97e0-2a999ef7dcef\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.191488 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6acf2b70-484b-4cae-97e0-2a999ef7dcef-logs" (OuterVolumeSpecName: "logs") pod "6acf2b70-484b-4cae-97e0-2a999ef7dcef" (UID: "6acf2b70-484b-4cae-97e0-2a999ef7dcef"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.196277 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6acf2b70-484b-4cae-97e0-2a999ef7dcef-kube-api-access-8574h" (OuterVolumeSpecName: "kube-api-access-8574h") pod "6acf2b70-484b-4cae-97e0-2a999ef7dcef" (UID: "6acf2b70-484b-4cae-97e0-2a999ef7dcef"). InnerVolumeSpecName "kube-api-access-8574h". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.196308 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6acf2b70-484b-4cae-97e0-2a999ef7dcef-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "6acf2b70-484b-4cae-97e0-2a999ef7dcef" (UID: "6acf2b70-484b-4cae-97e0-2a999ef7dcef"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.217035 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-scripts" (OuterVolumeSpecName: "scripts") pod "6acf2b70-484b-4cae-97e0-2a999ef7dcef" (UID: "6acf2b70-484b-4cae-97e0-2a999ef7dcef"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.221823 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-config-data" (OuterVolumeSpecName: "config-data") pod "6acf2b70-484b-4cae-97e0-2a999ef7dcef" (UID: "6acf2b70-484b-4cae-97e0-2a999ef7dcef"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.224658 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.261443 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.290921 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7353446-2b8c-48fc-8267-42b6f3ac0502-logs\") pod \"e7353446-2b8c-48fc-8267-42b6f3ac0502\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.290981 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9pwk\" (UniqueName: \"kubernetes.io/projected/8cd0df95-6511-4220-8bdd-53ed795a0606-kube-api-access-v9pwk\") pod \"8cd0df95-6511-4220-8bdd-53ed795a0606\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291018 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7353446-2b8c-48fc-8267-42b6f3ac0502-horizon-secret-key\") pod \"e7353446-2b8c-48fc-8267-42b6f3ac0502\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291137 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-config-data\") pod \"e7353446-2b8c-48fc-8267-42b6f3ac0502\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291240 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e7353446-2b8c-48fc-8267-42b6f3ac0502-logs" (OuterVolumeSpecName: "logs") pod "e7353446-2b8c-48fc-8267-42b6f3ac0502" (UID: "e7353446-2b8c-48fc-8267-42b6f3ac0502"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291280 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q2kwg\" (UniqueName: \"kubernetes.io/projected/e7353446-2b8c-48fc-8267-42b6f3ac0502-kube-api-access-q2kwg\") pod \"e7353446-2b8c-48fc-8267-42b6f3ac0502\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291336 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-config-data\") pod \"8cd0df95-6511-4220-8bdd-53ed795a0606\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291389 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8cd0df95-6511-4220-8bdd-53ed795a0606-horizon-secret-key\") pod \"8cd0df95-6511-4220-8bdd-53ed795a0606\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291425 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cd0df95-6511-4220-8bdd-53ed795a0606-logs\") pod \"8cd0df95-6511-4220-8bdd-53ed795a0606\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291524 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-scripts\") pod \"8cd0df95-6511-4220-8bdd-53ed795a0606\" (UID: \"8cd0df95-6511-4220-8bdd-53ed795a0606\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.291578 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-scripts\") pod \"e7353446-2b8c-48fc-8267-42b6f3ac0502\" (UID: \"e7353446-2b8c-48fc-8267-42b6f3ac0502\") " Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292069 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cd0df95-6511-4220-8bdd-53ed795a0606-logs" (OuterVolumeSpecName: "logs") pod "8cd0df95-6511-4220-8bdd-53ed795a0606" (UID: "8cd0df95-6511-4220-8bdd-53ed795a0606"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292145 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e7353446-2b8c-48fc-8267-42b6f3ac0502-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292160 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292174 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8574h\" (UniqueName: \"kubernetes.io/projected/6acf2b70-484b-4cae-97e0-2a999ef7dcef-kube-api-access-8574h\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292184 4693 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/6acf2b70-484b-4cae-97e0-2a999ef7dcef-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292193 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6acf2b70-484b-4cae-97e0-2a999ef7dcef-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.292202 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/6acf2b70-484b-4cae-97e0-2a999ef7dcef-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.296228 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7353446-2b8c-48fc-8267-42b6f3ac0502-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "e7353446-2b8c-48fc-8267-42b6f3ac0502" (UID: "e7353446-2b8c-48fc-8267-42b6f3ac0502"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.296312 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cd0df95-6511-4220-8bdd-53ed795a0606-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "8cd0df95-6511-4220-8bdd-53ed795a0606" (UID: "8cd0df95-6511-4220-8bdd-53ed795a0606"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.297968 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7353446-2b8c-48fc-8267-42b6f3ac0502-kube-api-access-q2kwg" (OuterVolumeSpecName: "kube-api-access-q2kwg") pod "e7353446-2b8c-48fc-8267-42b6f3ac0502" (UID: "e7353446-2b8c-48fc-8267-42b6f3ac0502"). InnerVolumeSpecName "kube-api-access-q2kwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.298130 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cd0df95-6511-4220-8bdd-53ed795a0606-kube-api-access-v9pwk" (OuterVolumeSpecName: "kube-api-access-v9pwk") pod "8cd0df95-6511-4220-8bdd-53ed795a0606" (UID: "8cd0df95-6511-4220-8bdd-53ed795a0606"). InnerVolumeSpecName "kube-api-access-v9pwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.310819 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-scripts" (OuterVolumeSpecName: "scripts") pod "8cd0df95-6511-4220-8bdd-53ed795a0606" (UID: "8cd0df95-6511-4220-8bdd-53ed795a0606"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.312612 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-config-data" (OuterVolumeSpecName: "config-data") pod "e7353446-2b8c-48fc-8267-42b6f3ac0502" (UID: "e7353446-2b8c-48fc-8267-42b6f3ac0502"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.314391 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-config-data" (OuterVolumeSpecName: "config-data") pod "8cd0df95-6511-4220-8bdd-53ed795a0606" (UID: "8cd0df95-6511-4220-8bdd-53ed795a0606"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.315770 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-scripts" (OuterVolumeSpecName: "scripts") pod "e7353446-2b8c-48fc-8267-42b6f3ac0502" (UID: "e7353446-2b8c-48fc-8267-42b6f3ac0502"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393226 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8cd0df95-6511-4220-8bdd-53ed795a0606-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393261 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393271 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393281 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9pwk\" (UniqueName: \"kubernetes.io/projected/8cd0df95-6511-4220-8bdd-53ed795a0606-kube-api-access-v9pwk\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393293 4693 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/e7353446-2b8c-48fc-8267-42b6f3ac0502-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393303 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/e7353446-2b8c-48fc-8267-42b6f3ac0502-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393314 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q2kwg\" (UniqueName: \"kubernetes.io/projected/e7353446-2b8c-48fc-8267-42b6f3ac0502-kube-api-access-q2kwg\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393324 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8cd0df95-6511-4220-8bdd-53ed795a0606-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.393332 4693 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8cd0df95-6511-4220-8bdd-53ed795a0606-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.471010 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6bb9684445-rmchz" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.471048 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6bb9684445-rmchz" event={"ID":"6acf2b70-484b-4cae-97e0-2a999ef7dcef","Type":"ContainerDied","Data":"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.471123 4693 scope.go:117] "RemoveContainer" containerID="03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.478618 4693 generic.go:334] "Generic (PLEG): container finished" podID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerID="03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72" exitCode=137 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.478644 4693 generic.go:334] "Generic (PLEG): container finished" podID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerID="bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040" exitCode=137 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.478709 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6bb9684445-rmchz" event={"ID":"6acf2b70-484b-4cae-97e0-2a999ef7dcef","Type":"ContainerDied","Data":"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.478733 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6bb9684445-rmchz" event={"ID":"6acf2b70-484b-4cae-97e0-2a999ef7dcef","Type":"ContainerDied","Data":"a49323af211301011c735148594b6765b684dbb29975a1f98d8478c50e5a138b"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.482664 4693 generic.go:334] "Generic (PLEG): container finished" podID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerID="25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7" exitCode=137 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.482690 4693 generic.go:334] "Generic (PLEG): container finished" podID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerID="f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2" exitCode=137 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.482774 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-c9ccb65bf-fw9qk" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.482802 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c9ccb65bf-fw9qk" event={"ID":"8cd0df95-6511-4220-8bdd-53ed795a0606","Type":"ContainerDied","Data":"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.482821 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c9ccb65bf-fw9qk" event={"ID":"8cd0df95-6511-4220-8bdd-53ed795a0606","Type":"ContainerDied","Data":"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.482832 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-c9ccb65bf-fw9qk" event={"ID":"8cd0df95-6511-4220-8bdd-53ed795a0606","Type":"ContainerDied","Data":"1b0af9707d435c7cba80fc1f90c94ced33bba99a3725e4881f7ef0e9601a51bd"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.485088 4693 generic.go:334] "Generic (PLEG): container finished" podID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerID="910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab" exitCode=137 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.485109 4693 generic.go:334] "Generic (PLEG): container finished" podID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerID="19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc" exitCode=137 Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.485126 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c46c9cb9c-wcdw2" event={"ID":"e7353446-2b8c-48fc-8267-42b6f3ac0502","Type":"ContainerDied","Data":"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.485141 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c46c9cb9c-wcdw2" event={"ID":"e7353446-2b8c-48fc-8267-42b6f3ac0502","Type":"ContainerDied","Data":"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.485151 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-6c46c9cb9c-wcdw2" event={"ID":"e7353446-2b8c-48fc-8267-42b6f3ac0502","Type":"ContainerDied","Data":"bfeab9714cc74f87e851852771321b417347a39b0a93500c4f97e284c37d5dca"} Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.485413 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-6c46c9cb9c-wcdw2" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.499993 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6bb9684445-rmchz"] Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.510571 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6bb9684445-rmchz"] Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.531575 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-6c46c9cb9c-wcdw2"] Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.540455 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-6c46c9cb9c-wcdw2"] Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.546794 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-c9ccb65bf-fw9qk"] Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.554013 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-c9ccb65bf-fw9qk"] Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.613069 4693 scope.go:117] "RemoveContainer" containerID="bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.626838 4693 scope.go:117] "RemoveContainer" containerID="03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72" Nov 22 09:18:39 crc kubenswrapper[4693]: E1122 09:18:39.627128 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72\": container with ID starting with 03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72 not found: ID does not exist" containerID="03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627160 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72"} err="failed to get container status \"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72\": rpc error: code = NotFound desc = could not find container \"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72\": container with ID starting with 03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627182 4693 scope.go:117] "RemoveContainer" containerID="bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040" Nov 22 09:18:39 crc kubenswrapper[4693]: E1122 09:18:39.627425 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040\": container with ID starting with bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040 not found: ID does not exist" containerID="bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627447 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040"} err="failed to get container status \"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040\": rpc error: code = NotFound desc = could not find container \"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040\": container with ID starting with bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627463 4693 scope.go:117] "RemoveContainer" containerID="03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627633 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72"} err="failed to get container status \"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72\": rpc error: code = NotFound desc = could not find container \"03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72\": container with ID starting with 03d0820b75dfbb0cb7c871ddb8c3af36e531423933b31b77c3cb9ce54d3acc72 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627645 4693 scope.go:117] "RemoveContainer" containerID="bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627839 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040"} err="failed to get container status \"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040\": rpc error: code = NotFound desc = could not find container \"bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040\": container with ID starting with bcfe68eaed2980bda286f13504f66366e1dc0474cb1023eb3498369270733040 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.627866 4693 scope.go:117] "RemoveContainer" containerID="25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.838334 4693 scope.go:117] "RemoveContainer" containerID="f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.861741 4693 scope.go:117] "RemoveContainer" containerID="25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7" Nov 22 09:18:39 crc kubenswrapper[4693]: E1122 09:18:39.862201 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7\": container with ID starting with 25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7 not found: ID does not exist" containerID="25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.862245 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7"} err="failed to get container status \"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7\": rpc error: code = NotFound desc = could not find container \"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7\": container with ID starting with 25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.862274 4693 scope.go:117] "RemoveContainer" containerID="f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2" Nov 22 09:18:39 crc kubenswrapper[4693]: E1122 09:18:39.862581 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2\": container with ID starting with f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2 not found: ID does not exist" containerID="f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.862613 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2"} err="failed to get container status \"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2\": rpc error: code = NotFound desc = could not find container \"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2\": container with ID starting with f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.862633 4693 scope.go:117] "RemoveContainer" containerID="25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.862880 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7"} err="failed to get container status \"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7\": rpc error: code = NotFound desc = could not find container \"25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7\": container with ID starting with 25ad90f7c7cae434257300d5220bf9f1fee468a27faf4ddce6307f23bb4223a7 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.862899 4693 scope.go:117] "RemoveContainer" containerID="f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.863225 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2"} err="failed to get container status \"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2\": rpc error: code = NotFound desc = could not find container \"f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2\": container with ID starting with f2e80146a0ee3360b985fc46137c6111d0e2e7e3e8efb6aedb6e6c95635097b2 not found: ID does not exist" Nov 22 09:18:39 crc kubenswrapper[4693]: I1122 09:18:39.863274 4693 scope.go:117] "RemoveContainer" containerID="910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.019643 4693 scope.go:117] "RemoveContainer" containerID="19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.039760 4693 scope.go:117] "RemoveContainer" containerID="910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab" Nov 22 09:18:40 crc kubenswrapper[4693]: E1122 09:18:40.040289 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab\": container with ID starting with 910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab not found: ID does not exist" containerID="910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.040343 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab"} err="failed to get container status \"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab\": rpc error: code = NotFound desc = could not find container \"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab\": container with ID starting with 910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab not found: ID does not exist" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.040378 4693 scope.go:117] "RemoveContainer" containerID="19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc" Nov 22 09:18:40 crc kubenswrapper[4693]: E1122 09:18:40.040953 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc\": container with ID starting with 19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc not found: ID does not exist" containerID="19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.040989 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc"} err="failed to get container status \"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc\": rpc error: code = NotFound desc = could not find container \"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc\": container with ID starting with 19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc not found: ID does not exist" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.041009 4693 scope.go:117] "RemoveContainer" containerID="910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.041306 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab"} err="failed to get container status \"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab\": rpc error: code = NotFound desc = could not find container \"910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab\": container with ID starting with 910fe85db81517758b6631056a545815646590a9066a0f2942cdf44fafa9beab not found: ID does not exist" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.041329 4693 scope.go:117] "RemoveContainer" containerID="19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.041651 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc"} err="failed to get container status \"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc\": rpc error: code = NotFound desc = could not find container \"19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc\": container with ID starting with 19777d7cd688f44b4e393a5964f26c3787eeff79386146213ca98ebb3d3f59fc not found: ID does not exist" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.156743 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" path="/var/lib/kubelet/pods/6acf2b70-484b-4cae-97e0-2a999ef7dcef/volumes" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.157433 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" path="/var/lib/kubelet/pods/8cd0df95-6511-4220-8bdd-53ed795a0606/volumes" Nov 22 09:18:40 crc kubenswrapper[4693]: I1122 09:18:40.158287 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" path="/var/lib/kubelet/pods/e7353446-2b8c-48fc-8267-42b6f3ac0502/volumes" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.037076 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.086253 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-d4tj4"] Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.086594 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerName="dnsmasq-dns" containerID="cri-o://8078cfe99d3fc165ed1069c6f1a6387c434c9b60a901d9e2d0800abe092f7983" gracePeriod=10 Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.317008 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.400526 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.507587 4693 generic.go:334] "Generic (PLEG): container finished" podID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerID="fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30" exitCode=0 Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.507655 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dc677d8b-gqzpn" event={"ID":"77e4ba75-fe85-4b6f-8946-30ab162512bc","Type":"ContainerDied","Data":"fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30"} Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.508859 4693 generic.go:334] "Generic (PLEG): container finished" podID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerID="8078cfe99d3fc165ed1069c6f1a6387c434c9b60a901d9e2d0800abe092f7983" exitCode=0 Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.509053 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="cinder-scheduler" containerID="cri-o://38e1d3bc7bc5a77353238d3c6c503c80f529dc66c1dfc7aec4b59ba2c6960e37" gracePeriod=30 Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.509369 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" event={"ID":"cfcddea0-5e6c-4ae4-b8af-d4438dc61861","Type":"ContainerDied","Data":"8078cfe99d3fc165ed1069c6f1a6387c434c9b60a901d9e2d0800abe092f7983"} Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.509615 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="probe" containerID="cri-o://99fcc5431e24b22d409844bd2d38a9737cb2b98ac5503ccc95ebe32368a1f934" gracePeriod=30 Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.650921 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.745602 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-swift-storage-0\") pod \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.745654 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-sb\") pod \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.745910 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-config\") pod \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.745990 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-nb\") pod \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.746151 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9cdjn\" (UniqueName: \"kubernetes.io/projected/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-kube-api-access-9cdjn\") pod \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.746264 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-svc\") pod \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\" (UID: \"cfcddea0-5e6c-4ae4-b8af-d4438dc61861\") " Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.754877 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-kube-api-access-9cdjn" (OuterVolumeSpecName: "kube-api-access-9cdjn") pod "cfcddea0-5e6c-4ae4-b8af-d4438dc61861" (UID: "cfcddea0-5e6c-4ae4-b8af-d4438dc61861"). InnerVolumeSpecName "kube-api-access-9cdjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.804995 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cfcddea0-5e6c-4ae4-b8af-d4438dc61861" (UID: "cfcddea0-5e6c-4ae4-b8af-d4438dc61861"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.805603 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cfcddea0-5e6c-4ae4-b8af-d4438dc61861" (UID: "cfcddea0-5e6c-4ae4-b8af-d4438dc61861"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.807889 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cfcddea0-5e6c-4ae4-b8af-d4438dc61861" (UID: "cfcddea0-5e6c-4ae4-b8af-d4438dc61861"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.813141 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cfcddea0-5e6c-4ae4-b8af-d4438dc61861" (UID: "cfcddea0-5e6c-4ae4-b8af-d4438dc61861"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.836635 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-config" (OuterVolumeSpecName: "config") pod "cfcddea0-5e6c-4ae4-b8af-d4438dc61861" (UID: "cfcddea0-5e6c-4ae4-b8af-d4438dc61861"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.848628 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.848655 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.848670 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.848696 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.848705 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:41 crc kubenswrapper[4693]: I1122 09:18:41.848715 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9cdjn\" (UniqueName: \"kubernetes.io/projected/cfcddea0-5e6c-4ae4-b8af-d4438dc61861-kube-api-access-9cdjn\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.031217 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.052198 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-ovndb-tls-certs\") pod \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.052359 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-httpd-config\") pod \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.052472 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-combined-ca-bundle\") pod \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.052510 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-config\") pod \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.052570 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tz42n\" (UniqueName: \"kubernetes.io/projected/6b70ae24-cb73-4a6b-8c10-6567f24991fe-kube-api-access-tz42n\") pod \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\" (UID: \"6b70ae24-cb73-4a6b-8c10-6567f24991fe\") " Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.056107 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b70ae24-cb73-4a6b-8c10-6567f24991fe-kube-api-access-tz42n" (OuterVolumeSpecName: "kube-api-access-tz42n") pod "6b70ae24-cb73-4a6b-8c10-6567f24991fe" (UID: "6b70ae24-cb73-4a6b-8c10-6567f24991fe"). InnerVolumeSpecName "kube-api-access-tz42n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.066560 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "6b70ae24-cb73-4a6b-8c10-6567f24991fe" (UID: "6b70ae24-cb73-4a6b-8c10-6567f24991fe"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.100351 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b70ae24-cb73-4a6b-8c10-6567f24991fe" (UID: "6b70ae24-cb73-4a6b-8c10-6567f24991fe"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.107304 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-config" (OuterVolumeSpecName: "config") pod "6b70ae24-cb73-4a6b-8c10-6567f24991fe" (UID: "6b70ae24-cb73-4a6b-8c10-6567f24991fe"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.118573 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "6b70ae24-cb73-4a6b-8c10-6567f24991fe" (UID: "6b70ae24-cb73-4a6b-8c10-6567f24991fe"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.154958 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.154987 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.154998 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tz42n\" (UniqueName: \"kubernetes.io/projected/6b70ae24-cb73-4a6b-8c10-6567f24991fe-kube-api-access-tz42n\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.155009 4693 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.155019 4693 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/6b70ae24-cb73-4a6b-8c10-6567f24991fe-httpd-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.316826 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-9f6899d66-t5sml" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.516256 4693 generic.go:334] "Generic (PLEG): container finished" podID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerID="a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8" exitCode=0 Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.516311 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-5f8cbc66c6-d5d77" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.516329 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8cbc66c6-d5d77" event={"ID":"6b70ae24-cb73-4a6b-8c10-6567f24991fe","Type":"ContainerDied","Data":"a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8"} Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.516649 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-5f8cbc66c6-d5d77" event={"ID":"6b70ae24-cb73-4a6b-8c10-6567f24991fe","Type":"ContainerDied","Data":"90f87db59d959deff7738a8fda51d85a99c03bb0113efc8c7f07706af26ca6f0"} Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.516665 4693 scope.go:117] "RemoveContainer" containerID="8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.521112 4693 generic.go:334] "Generic (PLEG): container finished" podID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerID="99fcc5431e24b22d409844bd2d38a9737cb2b98ac5503ccc95ebe32368a1f934" exitCode=0 Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.521167 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"916b3e9c-271f-49e1-8c84-b41fec6b7cca","Type":"ContainerDied","Data":"99fcc5431e24b22d409844bd2d38a9737cb2b98ac5503ccc95ebe32368a1f934"} Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.522899 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" event={"ID":"cfcddea0-5e6c-4ae4-b8af-d4438dc61861","Type":"ContainerDied","Data":"60cc3c35ca133782a1e2c533724236efbe3f75a2d3b576b0d04ffdc0ac04519d"} Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.523006 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66b66f7449-d4tj4" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.540657 4693 scope.go:117] "RemoveContainer" containerID="a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.542892 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-5f8cbc66c6-d5d77"] Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.549195 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-5f8cbc66c6-d5d77"] Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.556391 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-d4tj4"] Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.558570 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66b66f7449-d4tj4"] Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.580923 4693 scope.go:117] "RemoveContainer" containerID="8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad" Nov 22 09:18:42 crc kubenswrapper[4693]: E1122 09:18:42.581380 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad\": container with ID starting with 8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad not found: ID does not exist" containerID="8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.581449 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad"} err="failed to get container status \"8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad\": rpc error: code = NotFound desc = could not find container \"8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad\": container with ID starting with 8e17957641e4a584affa451f4619c8d7a92057b9729e29828bbb9bb1b36e06ad not found: ID does not exist" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.581482 4693 scope.go:117] "RemoveContainer" containerID="a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8" Nov 22 09:18:42 crc kubenswrapper[4693]: E1122 09:18:42.581902 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8\": container with ID starting with a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8 not found: ID does not exist" containerID="a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.581931 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8"} err="failed to get container status \"a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8\": rpc error: code = NotFound desc = could not find container \"a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8\": container with ID starting with a73b039f5c0494a763d236791a8cc28733a750ffe5de41a70765470ec2387cd8 not found: ID does not exist" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.581950 4693 scope.go:117] "RemoveContainer" containerID="8078cfe99d3fc165ed1069c6f1a6387c434c9b60a901d9e2d0800abe092f7983" Nov 22 09:18:42 crc kubenswrapper[4693]: I1122 09:18:42.598449 4693 scope.go:117] "RemoveContainer" containerID="3facd3da1e0cab358a7feb42dbe7a5ac98eb880da1116a99eade23eba7a9d2d0" Nov 22 09:18:43 crc kubenswrapper[4693]: I1122 09:18:43.694434 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-67dc677d8b-gqzpn" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.155203 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" path="/var/lib/kubelet/pods/6b70ae24-cb73-4a6b-8c10-6567f24991fe/volumes" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.155873 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" path="/var/lib/kubelet/pods/cfcddea0-5e6c-4ae4-b8af-d4438dc61861/volumes" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.543182 4693 generic.go:334] "Generic (PLEG): container finished" podID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerID="38e1d3bc7bc5a77353238d3c6c503c80f529dc66c1dfc7aec4b59ba2c6960e37" exitCode=0 Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.543478 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"916b3e9c-271f-49e1-8c84-b41fec6b7cca","Type":"ContainerDied","Data":"38e1d3bc7bc5a77353238d3c6c503c80f529dc66c1dfc7aec4b59ba2c6960e37"} Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.543511 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"916b3e9c-271f-49e1-8c84-b41fec6b7cca","Type":"ContainerDied","Data":"1a9c590ec0a5e8df582c303591fe3adb2b0a947f9e943df60f39c2685c1a9b88"} Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.543523 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1a9c590ec0a5e8df582c303591fe3adb2b0a947f9e943df60f39c2685c1a9b88" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.558867 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.608153 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/916b3e9c-271f-49e1-8c84-b41fec6b7cca-etc-machine-id\") pod \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.608227 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-scripts\") pod \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.608379 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data\") pod \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.608541 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zml9f\" (UniqueName: \"kubernetes.io/projected/916b3e9c-271f-49e1-8c84-b41fec6b7cca-kube-api-access-zml9f\") pod \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.608686 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-combined-ca-bundle\") pod \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.608881 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data-custom\") pod \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\" (UID: \"916b3e9c-271f-49e1-8c84-b41fec6b7cca\") " Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.632972 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/916b3e9c-271f-49e1-8c84-b41fec6b7cca-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "916b3e9c-271f-49e1-8c84-b41fec6b7cca" (UID: "916b3e9c-271f-49e1-8c84-b41fec6b7cca"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.641278 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "916b3e9c-271f-49e1-8c84-b41fec6b7cca" (UID: "916b3e9c-271f-49e1-8c84-b41fec6b7cca"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.641762 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/916b3e9c-271f-49e1-8c84-b41fec6b7cca-kube-api-access-zml9f" (OuterVolumeSpecName: "kube-api-access-zml9f") pod "916b3e9c-271f-49e1-8c84-b41fec6b7cca" (UID: "916b3e9c-271f-49e1-8c84-b41fec6b7cca"). InnerVolumeSpecName "kube-api-access-zml9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.666961 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-scripts" (OuterVolumeSpecName: "scripts") pod "916b3e9c-271f-49e1-8c84-b41fec6b7cca" (UID: "916b3e9c-271f-49e1-8c84-b41fec6b7cca"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.717122 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "916b3e9c-271f-49e1-8c84-b41fec6b7cca" (UID: "916b3e9c-271f-49e1-8c84-b41fec6b7cca"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.718015 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zml9f\" (UniqueName: \"kubernetes.io/projected/916b3e9c-271f-49e1-8c84-b41fec6b7cca-kube-api-access-zml9f\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.718042 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.718050 4693 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data-custom\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.718059 4693 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/916b3e9c-271f-49e1-8c84-b41fec6b7cca-etc-machine-id\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.718066 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.769052 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data" (OuterVolumeSpecName: "config-data") pod "916b3e9c-271f-49e1-8c84-b41fec6b7cca" (UID: "916b3e9c-271f-49e1-8c84-b41fec6b7cca"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:44 crc kubenswrapper[4693]: I1122 09:18:44.820108 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/916b3e9c-271f-49e1-8c84-b41fec6b7cca-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.551766 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.578699 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.584762 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.599953 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600290 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="cinder-scheduler" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600310 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="cinder-scheduler" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600319 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600324 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600331 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600336 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600349 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600354 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600362 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600367 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600375 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerName="dnsmasq-dns" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600380 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerName="dnsmasq-dns" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600386 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="probe" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600392 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="probe" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600406 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerName="init" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600411 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerName="init" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600425 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-httpd" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600430 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-httpd" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600439 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600444 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600457 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600463 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: E1122 09:18:45.600471 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-api" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600476 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-api" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600613 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600628 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600637 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="cinder-scheduler" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600644 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-httpd" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600652 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" containerName="probe" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600659 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600669 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="cfcddea0-5e6c-4ae4-b8af-d4438dc61861" containerName="dnsmasq-dns" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600680 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6acf2b70-484b-4cae-97e0-2a999ef7dcef" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600686 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cd0df95-6511-4220-8bdd-53ed795a0606" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600693 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b70ae24-cb73-4a6b-8c10-6567f24991fe" containerName="neutron-api" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.600702 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e7353446-2b8c-48fc-8267-42b6f3ac0502" containerName="horizon-log" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.602008 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.604649 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.609785 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.713770 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-5947459fbf-s5xqj" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.738405 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5284f375-48e8-4caa-a5f9-fa762001ce69-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.738461 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-config-data\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.738499 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9fmf\" (UniqueName: \"kubernetes.io/projected/5284f375-48e8-4caa-a5f9-fa762001ce69-kube-api-access-q9fmf\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.738540 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.738565 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-scripts\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.738587 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.840339 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9fmf\" (UniqueName: \"kubernetes.io/projected/5284f375-48e8-4caa-a5f9-fa762001ce69-kube-api-access-q9fmf\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.840409 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.840443 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-scripts\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.840474 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.840566 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5284f375-48e8-4caa-a5f9-fa762001ce69-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.840610 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-config-data\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.841645 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/5284f375-48e8-4caa-a5f9-fa762001ce69-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.845359 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.845549 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-config-data\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.846605 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-scripts\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.847710 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/5284f375-48e8-4caa-a5f9-fa762001ce69-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.855874 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9fmf\" (UniqueName: \"kubernetes.io/projected/5284f375-48e8-4caa-a5f9-fa762001ce69-kube-api-access-q9fmf\") pod \"cinder-scheduler-0\" (UID: \"5284f375-48e8-4caa-a5f9-fa762001ce69\") " pod="openstack/cinder-scheduler-0" Nov 22 09:18:45 crc kubenswrapper[4693]: I1122 09:18:45.916505 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Nov 22 09:18:46 crc kubenswrapper[4693]: I1122 09:18:46.156215 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="916b3e9c-271f-49e1-8c84-b41fec6b7cca" path="/var/lib/kubelet/pods/916b3e9c-271f-49e1-8c84-b41fec6b7cca/volumes" Nov 22 09:18:46 crc kubenswrapper[4693]: I1122 09:18:46.315698 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Nov 22 09:18:46 crc kubenswrapper[4693]: I1122 09:18:46.565018 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5284f375-48e8-4caa-a5f9-fa762001ce69","Type":"ContainerStarted","Data":"4abb65d5e3c8bbc066881f9c4dbe2ea3514109a12aa6521bb9d343fdde7a8f5d"} Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.387613 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.606912 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5284f375-48e8-4caa-a5f9-fa762001ce69","Type":"ContainerStarted","Data":"683eacb1ec06f72d3e71a0f25302075172c480d8b05c0fa480b5943e68a3faf9"} Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.606971 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"5284f375-48e8-4caa-a5f9-fa762001ce69","Type":"ContainerStarted","Data":"e22b925d8357efd4567391085acc99ebff4286900011d2e006ad54a53b9ef191"} Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.642861 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=2.642827741 podStartE2EDuration="2.642827741s" podCreationTimestamp="2025-11-22 09:18:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:47.637329901 +0000 UTC m=+923.779832192" watchObservedRunningTime="2025-11-22 09:18:47.642827741 +0000 UTC m=+923.785330032" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.907109 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.908533 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.912114 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-k8spz" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.912140 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.912355 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.912501 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.990354 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvmwj\" (UniqueName: \"kubernetes.io/projected/814898a5-e617-4f53-b670-7c02aae9f5d6-kube-api-access-zvmwj\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.990435 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config-secret\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.990486 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:47 crc kubenswrapper[4693]: I1122 09:18:47.990532 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.091980 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config-secret\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.092226 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.092362 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.092448 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvmwj\" (UniqueName: \"kubernetes.io/projected/814898a5-e617-4f53-b670-7c02aae9f5d6-kube-api-access-zvmwj\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.093747 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.097159 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config-secret\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.102339 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-combined-ca-bundle\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.110423 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvmwj\" (UniqueName: \"kubernetes.io/projected/814898a5-e617-4f53-b670-7c02aae9f5d6-kube-api-access-zvmwj\") pod \"openstackclient\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.212270 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.213093 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.219557 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.241830 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.244048 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.270964 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.296940 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k5q24\" (UniqueName: \"kubernetes.io/projected/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-kube-api-access-k5q24\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.297072 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-openstack-config\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.297170 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.297333 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-openstack-config-secret\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: E1122 09:18:48.360425 4693 log.go:32] "RunPodSandbox from runtime service failed" err=< Nov 22 09:18:48 crc kubenswrapper[4693]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_814898a5-e617-4f53-b670-7c02aae9f5d6_0(48fdd9d78eba654de73b35551cba074cef2455ff33ead2ebdcaff34bd55e8a01): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"48fdd9d78eba654de73b35551cba074cef2455ff33ead2ebdcaff34bd55e8a01" Netns:"/var/run/netns/c035568a-8566-4e5f-bca3-ef6617f24d5c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=48fdd9d78eba654de73b35551cba074cef2455ff33ead2ebdcaff34bd55e8a01;K8S_POD_UID=814898a5-e617-4f53-b670-7c02aae9f5d6" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/814898a5-e617-4f53-b670-7c02aae9f5d6]: expected pod UID "814898a5-e617-4f53-b670-7c02aae9f5d6" but got "6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d" from Kube API Nov 22 09:18:48 crc kubenswrapper[4693]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 22 09:18:48 crc kubenswrapper[4693]: > Nov 22 09:18:48 crc kubenswrapper[4693]: E1122 09:18:48.360763 4693 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Nov 22 09:18:48 crc kubenswrapper[4693]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_openstackclient_openstack_814898a5-e617-4f53-b670-7c02aae9f5d6_0(48fdd9d78eba654de73b35551cba074cef2455ff33ead2ebdcaff34bd55e8a01): error adding pod openstack_openstackclient to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"48fdd9d78eba654de73b35551cba074cef2455ff33ead2ebdcaff34bd55e8a01" Netns:"/var/run/netns/c035568a-8566-4e5f-bca3-ef6617f24d5c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openstack;K8S_POD_NAME=openstackclient;K8S_POD_INFRA_CONTAINER_ID=48fdd9d78eba654de73b35551cba074cef2455ff33ead2ebdcaff34bd55e8a01;K8S_POD_UID=814898a5-e617-4f53-b670-7c02aae9f5d6" Path:"" ERRORED: error configuring pod [openstack/openstackclient] networking: Multus: [openstack/openstackclient/814898a5-e617-4f53-b670-7c02aae9f5d6]: expected pod UID "814898a5-e617-4f53-b670-7c02aae9f5d6" but got "6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d" from Kube API Nov 22 09:18:48 crc kubenswrapper[4693]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Nov 22 09:18:48 crc kubenswrapper[4693]: > pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.399692 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-openstack-config-secret\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.399787 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k5q24\" (UniqueName: \"kubernetes.io/projected/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-kube-api-access-k5q24\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.399895 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-openstack-config\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.399974 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.401621 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-openstack-config\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.405115 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.407460 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-openstack-config-secret\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.421044 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k5q24\" (UniqueName: \"kubernetes.io/projected/6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d-kube-api-access-k5q24\") pod \"openstackclient\" (UID: \"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d\") " pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.610308 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.614016 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.616640 4693 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="814898a5-e617-4f53-b670-7c02aae9f5d6" podUID="6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.633938 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.704716 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config-secret\") pod \"814898a5-e617-4f53-b670-7c02aae9f5d6\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.704776 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config\") pod \"814898a5-e617-4f53-b670-7c02aae9f5d6\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.704840 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-combined-ca-bundle\") pod \"814898a5-e617-4f53-b670-7c02aae9f5d6\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.704896 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvmwj\" (UniqueName: \"kubernetes.io/projected/814898a5-e617-4f53-b670-7c02aae9f5d6-kube-api-access-zvmwj\") pod \"814898a5-e617-4f53-b670-7c02aae9f5d6\" (UID: \"814898a5-e617-4f53-b670-7c02aae9f5d6\") " Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.705264 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "814898a5-e617-4f53-b670-7c02aae9f5d6" (UID: "814898a5-e617-4f53-b670-7c02aae9f5d6"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.710974 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "814898a5-e617-4f53-b670-7c02aae9f5d6" (UID: "814898a5-e617-4f53-b670-7c02aae9f5d6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.710991 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/814898a5-e617-4f53-b670-7c02aae9f5d6-kube-api-access-zvmwj" (OuterVolumeSpecName: "kube-api-access-zvmwj") pod "814898a5-e617-4f53-b670-7c02aae9f5d6" (UID: "814898a5-e617-4f53-b670-7c02aae9f5d6"). InnerVolumeSpecName "kube-api-access-zvmwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.711035 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "814898a5-e617-4f53-b670-7c02aae9f5d6" (UID: "814898a5-e617-4f53-b670-7c02aae9f5d6"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.807309 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.807347 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/814898a5-e617-4f53-b670-7c02aae9f5d6-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.807358 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/814898a5-e617-4f53-b670-7c02aae9f5d6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:48 crc kubenswrapper[4693]: I1122 09:18:48.807367 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvmwj\" (UniqueName: \"kubernetes.io/projected/814898a5-e617-4f53-b670-7c02aae9f5d6-kube-api-access-zvmwj\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:49 crc kubenswrapper[4693]: I1122 09:18:49.005418 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Nov 22 09:18:49 crc kubenswrapper[4693]: I1122 09:18:49.623314 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Nov 22 09:18:49 crc kubenswrapper[4693]: I1122 09:18:49.623337 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d","Type":"ContainerStarted","Data":"f984281e4cbc53eeab3c8fadccaf78965c589967f353eda92913d21e9604f75c"} Nov 22 09:18:49 crc kubenswrapper[4693]: I1122 09:18:49.626226 4693 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openstack/openstackclient" oldPodUID="814898a5-e617-4f53-b670-7c02aae9f5d6" podUID="6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d" Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.187205 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="814898a5-e617-4f53-b670-7c02aae9f5d6" path="/var/lib/kubelet/pods/814898a5-e617-4f53-b670-7c02aae9f5d6/volumes" Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.894602 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.895221 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-central-agent" containerID="cri-o://da291a940212e51760a6510d87012fd7883b06ac24252cef062ed4da78eec894" gracePeriod=30 Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.895390 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="proxy-httpd" containerID="cri-o://f21b31973fff4ae5e32f576c352f67ac8875ad86f1f3234a5b38a8d5eb4443bf" gracePeriod=30 Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.895461 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-notification-agent" containerID="cri-o://c65da8152721e6d62dd7a5a2d4ccb755bd75ce02af2c4e5f93c220c8fb5e6c14" gracePeriod=30 Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.895528 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="sg-core" containerID="cri-o://9af482365a16ff5995fc63fc8a8ccf084fb07bcdb6ca237f7923d5b95b8eaac7" gracePeriod=30 Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.899460 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 09:18:50 crc kubenswrapper[4693]: I1122 09:18:50.916785 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.164106 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-795d6c4bbf-rtw6h"] Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.165614 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.167010 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.167292 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.168092 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.176349 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-795d6c4bbf-rtw6h"] Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.252544 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-internal-tls-certs\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.252743 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/51c40064-56a3-4186-bf60-6181686b256d-etc-swift\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.252785 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-config-data\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.253010 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51c40064-56a3-4186-bf60-6181686b256d-log-httpd\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.253174 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51c40064-56a3-4186-bf60-6181686b256d-run-httpd\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.253259 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-public-tls-certs\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.253302 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znfkq\" (UniqueName: \"kubernetes.io/projected/51c40064-56a3-4186-bf60-6181686b256d-kube-api-access-znfkq\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.253332 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-combined-ca-bundle\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.355770 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51c40064-56a3-4186-bf60-6181686b256d-log-httpd\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.355853 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51c40064-56a3-4186-bf60-6181686b256d-run-httpd\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.355894 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-public-tls-certs\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.355915 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znfkq\" (UniqueName: \"kubernetes.io/projected/51c40064-56a3-4186-bf60-6181686b256d-kube-api-access-znfkq\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.355939 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-combined-ca-bundle\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.356001 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-internal-tls-certs\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.356027 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-config-data\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.356040 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/51c40064-56a3-4186-bf60-6181686b256d-etc-swift\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.356275 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51c40064-56a3-4186-bf60-6181686b256d-log-httpd\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.356344 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/51c40064-56a3-4186-bf60-6181686b256d-run-httpd\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.361824 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/51c40064-56a3-4186-bf60-6181686b256d-etc-swift\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.363612 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-combined-ca-bundle\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.364041 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-config-data\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.366355 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-internal-tls-certs\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.369656 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znfkq\" (UniqueName: \"kubernetes.io/projected/51c40064-56a3-4186-bf60-6181686b256d-kube-api-access-znfkq\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.371473 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/51c40064-56a3-4186-bf60-6181686b256d-public-tls-certs\") pod \"swift-proxy-795d6c4bbf-rtw6h\" (UID: \"51c40064-56a3-4186-bf60-6181686b256d\") " pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.491145 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.645638 4693 generic.go:334] "Generic (PLEG): container finished" podID="4185887a-3a26-4abc-ab25-53033cc7e940" containerID="f21b31973fff4ae5e32f576c352f67ac8875ad86f1f3234a5b38a8d5eb4443bf" exitCode=0 Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.645945 4693 generic.go:334] "Generic (PLEG): container finished" podID="4185887a-3a26-4abc-ab25-53033cc7e940" containerID="9af482365a16ff5995fc63fc8a8ccf084fb07bcdb6ca237f7923d5b95b8eaac7" exitCode=2 Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.645960 4693 generic.go:334] "Generic (PLEG): container finished" podID="4185887a-3a26-4abc-ab25-53033cc7e940" containerID="da291a940212e51760a6510d87012fd7883b06ac24252cef062ed4da78eec894" exitCode=0 Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.645712 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerDied","Data":"f21b31973fff4ae5e32f576c352f67ac8875ad86f1f3234a5b38a8d5eb4443bf"} Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.646006 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerDied","Data":"9af482365a16ff5995fc63fc8a8ccf084fb07bcdb6ca237f7923d5b95b8eaac7"} Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.646022 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerDied","Data":"da291a940212e51760a6510d87012fd7883b06ac24252cef062ed4da78eec894"} Nov 22 09:18:51 crc kubenswrapper[4693]: I1122 09:18:51.944088 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-795d6c4bbf-rtw6h"] Nov 22 09:18:51 crc kubenswrapper[4693]: W1122 09:18:51.949992 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod51c40064_56a3_4186_bf60_6181686b256d.slice/crio-5bb97ff89ef0d807f7daa35634fba92b27f2f25afa6963bf758bae9f95e57ca0 WatchSource:0}: Error finding container 5bb97ff89ef0d807f7daa35634fba92b27f2f25afa6963bf758bae9f95e57ca0: Status 404 returned error can't find the container with id 5bb97ff89ef0d807f7daa35634fba92b27f2f25afa6963bf758bae9f95e57ca0 Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.457123 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.457816 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-log" containerID="cri-o://cc046355824e69ccf74334f7538525fac3291381fab0182ebe08371f6df761e4" gracePeriod=30 Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.457943 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-httpd" containerID="cri-o://aba38c12bf23ffadbc2f82662a7635f10a4c8679fec81c7deb2a47eb66442cbc" gracePeriod=30 Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.665087 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" event={"ID":"51c40064-56a3-4186-bf60-6181686b256d","Type":"ContainerStarted","Data":"4d426320053619e4f95c3d6fb070782180348fbfd5bd375329043a48d6e7ce2b"} Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.665129 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" event={"ID":"51c40064-56a3-4186-bf60-6181686b256d","Type":"ContainerStarted","Data":"355014621dae5f1889f72b469efe6ec1a4c95ebc0cc9bbb2776ad5fc2e5293ac"} Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.665144 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" event={"ID":"51c40064-56a3-4186-bf60-6181686b256d","Type":"ContainerStarted","Data":"5bb97ff89ef0d807f7daa35634fba92b27f2f25afa6963bf758bae9f95e57ca0"} Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.665989 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.666072 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.672909 4693 generic.go:334] "Generic (PLEG): container finished" podID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerID="cc046355824e69ccf74334f7538525fac3291381fab0182ebe08371f6df761e4" exitCode=143 Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.673195 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"648ffab2-f448-4bb1-a53f-1178c8dd8c32","Type":"ContainerDied","Data":"cc046355824e69ccf74334f7538525fac3291381fab0182ebe08371f6df761e4"} Nov 22 09:18:52 crc kubenswrapper[4693]: I1122 09:18:52.684772 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" podStartSLOduration=1.684761105 podStartE2EDuration="1.684761105s" podCreationTimestamp="2025-11-22 09:18:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:18:52.682732509 +0000 UTC m=+928.825234800" watchObservedRunningTime="2025-11-22 09:18:52.684761105 +0000 UTC m=+928.827263385" Nov 22 09:18:53 crc kubenswrapper[4693]: I1122 09:18:53.127963 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:18:53 crc kubenswrapper[4693]: I1122 09:18:53.128212 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-httpd" containerID="cri-o://94e8580de619ea53071d021e883694ef8a97f2451521f94e5370f31688423352" gracePeriod=30 Nov 22 09:18:53 crc kubenswrapper[4693]: I1122 09:18:53.128167 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-log" containerID="cri-o://dc8bee7fa151c8e85e188e167b44e73d8073a1fc91f60e2f436bf44e61f7ba2a" gracePeriod=30 Nov 22 09:18:53 crc kubenswrapper[4693]: I1122 09:18:53.692991 4693 generic.go:334] "Generic (PLEG): container finished" podID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerID="dc8bee7fa151c8e85e188e167b44e73d8073a1fc91f60e2f436bf44e61f7ba2a" exitCode=143 Nov 22 09:18:53 crc kubenswrapper[4693]: I1122 09:18:53.693108 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e33acce5-e1e0-4dd7-96d3-620949d2e6ed","Type":"ContainerDied","Data":"dc8bee7fa151c8e85e188e167b44e73d8073a1fc91f60e2f436bf44e61f7ba2a"} Nov 22 09:18:53 crc kubenswrapper[4693]: I1122 09:18:53.694382 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-67dc677d8b-gqzpn" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.443322 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-hcwm4"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.444799 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.451769 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hcwm4"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.542523 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gsc2f\" (UniqueName: \"kubernetes.io/projected/0e7e12bd-2fc6-445e-96bd-092859596ae4-kube-api-access-gsc2f\") pod \"nova-api-db-create-hcwm4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.542887 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e7e12bd-2fc6-445e-96bd-092859596ae4-operator-scripts\") pod \"nova-api-db-create-hcwm4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.552033 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-2bbd-account-create-phtjw"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.553332 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.554964 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.560420 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-lm5mh"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.561655 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.584047 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2bbd-account-create-phtjw"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.595827 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-lm5mh"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.645497 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pb85p\" (UniqueName: \"kubernetes.io/projected/81fa2494-ad98-4781-8a71-e518b21b6509-kube-api-access-pb85p\") pod \"nova-cell0-db-create-lm5mh\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.645570 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gsc2f\" (UniqueName: \"kubernetes.io/projected/0e7e12bd-2fc6-445e-96bd-092859596ae4-kube-api-access-gsc2f\") pod \"nova-api-db-create-hcwm4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.645869 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9dq5d\" (UniqueName: \"kubernetes.io/projected/a9753c17-d077-44bf-a381-e3dd7e4aa505-kube-api-access-9dq5d\") pod \"nova-api-2bbd-account-create-phtjw\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.645929 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81fa2494-ad98-4781-8a71-e518b21b6509-operator-scripts\") pod \"nova-cell0-db-create-lm5mh\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.646040 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e7e12bd-2fc6-445e-96bd-092859596ae4-operator-scripts\") pod \"nova-api-db-create-hcwm4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.646121 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9753c17-d077-44bf-a381-e3dd7e4aa505-operator-scripts\") pod \"nova-api-2bbd-account-create-phtjw\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.646675 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e7e12bd-2fc6-445e-96bd-092859596ae4-operator-scripts\") pod \"nova-api-db-create-hcwm4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.658076 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-269rq"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.660215 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.670919 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gsc2f\" (UniqueName: \"kubernetes.io/projected/0e7e12bd-2fc6-445e-96bd-092859596ae4-kube-api-access-gsc2f\") pod \"nova-api-db-create-hcwm4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.674440 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-269rq"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.731485 4693 generic.go:334] "Generic (PLEG): container finished" podID="4185887a-3a26-4abc-ab25-53033cc7e940" containerID="c65da8152721e6d62dd7a5a2d4ccb755bd75ce02af2c4e5f93c220c8fb5e6c14" exitCode=0 Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.731555 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerDied","Data":"c65da8152721e6d62dd7a5a2d4ccb755bd75ce02af2c4e5f93c220c8fb5e6c14"} Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.735532 4693 generic.go:334] "Generic (PLEG): container finished" podID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerID="aba38c12bf23ffadbc2f82662a7635f10a4c8679fec81c7deb2a47eb66442cbc" exitCode=0 Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.735561 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"648ffab2-f448-4bb1-a53f-1178c8dd8c32","Type":"ContainerDied","Data":"aba38c12bf23ffadbc2f82662a7635f10a4c8679fec81c7deb2a47eb66442cbc"} Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.748649 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9dq5d\" (UniqueName: \"kubernetes.io/projected/a9753c17-d077-44bf-a381-e3dd7e4aa505-kube-api-access-9dq5d\") pod \"nova-api-2bbd-account-create-phtjw\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.749365 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81fa2494-ad98-4781-8a71-e518b21b6509-operator-scripts\") pod \"nova-cell0-db-create-lm5mh\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.749535 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp6xq\" (UniqueName: \"kubernetes.io/projected/b6f9b111-6452-4462-9e5e-fe38c86e823b-kube-api-access-mp6xq\") pod \"nova-cell1-db-create-269rq\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.749574 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9753c17-d077-44bf-a381-e3dd7e4aa505-operator-scripts\") pod \"nova-api-2bbd-account-create-phtjw\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.749648 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6f9b111-6452-4462-9e5e-fe38c86e823b-operator-scripts\") pod \"nova-cell1-db-create-269rq\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.750679 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pb85p\" (UniqueName: \"kubernetes.io/projected/81fa2494-ad98-4781-8a71-e518b21b6509-kube-api-access-pb85p\") pod \"nova-cell0-db-create-lm5mh\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.751421 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81fa2494-ad98-4781-8a71-e518b21b6509-operator-scripts\") pod \"nova-cell0-db-create-lm5mh\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.751445 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9753c17-d077-44bf-a381-e3dd7e4aa505-operator-scripts\") pod \"nova-api-2bbd-account-create-phtjw\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.765383 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-dc3e-account-create-tplp4"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.766270 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9dq5d\" (UniqueName: \"kubernetes.io/projected/a9753c17-d077-44bf-a381-e3dd7e4aa505-kube-api-access-9dq5d\") pod \"nova-api-2bbd-account-create-phtjw\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.766549 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pb85p\" (UniqueName: \"kubernetes.io/projected/81fa2494-ad98-4781-8a71-e518b21b6509-kube-api-access-pb85p\") pod \"nova-cell0-db-create-lm5mh\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.766919 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.767371 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.775371 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.787908 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-dc3e-account-create-tplp4"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.852605 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6f9b111-6452-4462-9e5e-fe38c86e823b-operator-scripts\") pod \"nova-cell1-db-create-269rq\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.852655 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eeb9054c-f2a0-4e1a-b95f-210fca359716-operator-scripts\") pod \"nova-cell0-dc3e-account-create-tplp4\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.852689 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hzrwc\" (UniqueName: \"kubernetes.io/projected/eeb9054c-f2a0-4e1a-b95f-210fca359716-kube-api-access-hzrwc\") pod \"nova-cell0-dc3e-account-create-tplp4\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.852827 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp6xq\" (UniqueName: \"kubernetes.io/projected/b6f9b111-6452-4462-9e5e-fe38c86e823b-kube-api-access-mp6xq\") pod \"nova-cell1-db-create-269rq\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.853697 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6f9b111-6452-4462-9e5e-fe38c86e823b-operator-scripts\") pod \"nova-cell1-db-create-269rq\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.870181 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.875568 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp6xq\" (UniqueName: \"kubernetes.io/projected/b6f9b111-6452-4462-9e5e-fe38c86e823b-kube-api-access-mp6xq\") pod \"nova-cell1-db-create-269rq\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.884901 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.955401 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eeb9054c-f2a0-4e1a-b95f-210fca359716-operator-scripts\") pod \"nova-cell0-dc3e-account-create-tplp4\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.955834 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hzrwc\" (UniqueName: \"kubernetes.io/projected/eeb9054c-f2a0-4e1a-b95f-210fca359716-kube-api-access-hzrwc\") pod \"nova-cell0-dc3e-account-create-tplp4\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.956359 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eeb9054c-f2a0-4e1a-b95f-210fca359716-operator-scripts\") pod \"nova-cell0-dc3e-account-create-tplp4\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.962169 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-a227-account-create-zx7zl"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.963410 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.965407 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.983323 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a227-account-create-zx7zl"] Nov 22 09:18:55 crc kubenswrapper[4693]: I1122 09:18:55.985423 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hzrwc\" (UniqueName: \"kubernetes.io/projected/eeb9054c-f2a0-4e1a-b95f-210fca359716-kube-api-access-hzrwc\") pod \"nova-cell0-dc3e-account-create-tplp4\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.047264 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.057731 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-operator-scripts\") pod \"nova-cell1-a227-account-create-zx7zl\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.057774 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kj92\" (UniqueName: \"kubernetes.io/projected/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-kube-api-access-4kj92\") pod \"nova-cell1-a227-account-create-zx7zl\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.119974 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.162582 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-operator-scripts\") pod \"nova-cell1-a227-account-create-zx7zl\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.162666 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kj92\" (UniqueName: \"kubernetes.io/projected/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-kube-api-access-4kj92\") pod \"nova-cell1-a227-account-create-zx7zl\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.166020 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-operator-scripts\") pod \"nova-cell1-a227-account-create-zx7zl\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.168758 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.179078 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kj92\" (UniqueName: \"kubernetes.io/projected/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-kube-api-access-4kj92\") pod \"nova-cell1-a227-account-create-zx7zl\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.325199 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.745968 4693 generic.go:334] "Generic (PLEG): container finished" podID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerID="94e8580de619ea53071d021e883694ef8a97f2451521f94e5370f31688423352" exitCode=0 Nov 22 09:18:56 crc kubenswrapper[4693]: I1122 09:18:56.746037 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e33acce5-e1e0-4dd7-96d3-620949d2e6ed","Type":"ContainerDied","Data":"94e8580de619ea53071d021e883694ef8a97f2451521f94e5370f31688423352"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.051397 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.127721 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-log-httpd\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128104 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-combined-ca-bundle\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128143 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-config-data\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128196 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-run-httpd\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128326 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128368 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-scripts\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128403 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-sg-core-conf-yaml\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.128421 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnfrs\" (UniqueName: \"kubernetes.io/projected/4185887a-3a26-4abc-ab25-53033cc7e940-kube-api-access-mnfrs\") pod \"4185887a-3a26-4abc-ab25-53033cc7e940\" (UID: \"4185887a-3a26-4abc-ab25-53033cc7e940\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.129267 4693 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.129619 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.132018 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-scripts" (OuterVolumeSpecName: "scripts") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.132682 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4185887a-3a26-4abc-ab25-53033cc7e940-kube-api-access-mnfrs" (OuterVolumeSpecName: "kube-api-access-mnfrs") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "kube-api-access-mnfrs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.160968 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.219414 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.232524 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.233332 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.233356 4693 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4185887a-3a26-4abc-ab25-53033cc7e940-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.233365 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.233373 4693 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.233383 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnfrs\" (UniqueName: \"kubernetes.io/projected/4185887a-3a26-4abc-ab25-53033cc7e940-kube-api-access-mnfrs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.239058 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-config-data" (OuterVolumeSpecName: "config-data") pod "4185887a-3a26-4abc-ab25-53033cc7e940" (UID: "4185887a-3a26-4abc-ab25-53033cc7e940"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.337227 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-combined-ca-bundle\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.337765 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-logs\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338026 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338354 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-internal-tls-certs\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338665 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-config-data\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338142 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-logs" (OuterVolumeSpecName: "logs") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338737 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-scripts\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338758 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-httpd-run\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.338879 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7bdml\" (UniqueName: \"kubernetes.io/projected/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-kube-api-access-7bdml\") pod \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\" (UID: \"e33acce5-e1e0-4dd7-96d3-620949d2e6ed\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.339131 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.339713 4693 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.339726 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.339736 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4185887a-3a26-4abc-ab25-53033cc7e940-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.342464 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "glance") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.343358 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-scripts" (OuterVolumeSpecName: "scripts") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.343854 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-kube-api-access-7bdml" (OuterVolumeSpecName: "kube-api-access-7bdml") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "kube-api-access-7bdml". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.348151 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.360831 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.395878 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-config-data" (OuterVolumeSpecName: "config-data") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.397205 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "e33acce5-e1e0-4dd7-96d3-620949d2e6ed" (UID: "e33acce5-e1e0-4dd7-96d3-620949d2e6ed"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440300 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-combined-ca-bundle\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440354 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440434 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq46t\" (UniqueName: \"kubernetes.io/projected/648ffab2-f448-4bb1-a53f-1178c8dd8c32-kube-api-access-tq46t\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440460 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-logs\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440538 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-httpd-run\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440583 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-scripts\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440644 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-config-data\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.440666 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-public-tls-certs\") pod \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\" (UID: \"648ffab2-f448-4bb1-a53f-1178c8dd8c32\") " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441078 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441095 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7bdml\" (UniqueName: \"kubernetes.io/projected/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-kube-api-access-7bdml\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441106 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441109 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441124 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441133 4693 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441142 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e33acce5-e1e0-4dd7-96d3-620949d2e6ed-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.441347 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-logs" (OuterVolumeSpecName: "logs") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.443935 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-scripts" (OuterVolumeSpecName: "scripts") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.447344 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "glance") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.447379 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/648ffab2-f448-4bb1-a53f-1178c8dd8c32-kube-api-access-tq46t" (OuterVolumeSpecName: "kube-api-access-tq46t") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "kube-api-access-tq46t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.463087 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.465282 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.475606 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.484177 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-config-data" (OuterVolumeSpecName: "config-data") pod "648ffab2-f448-4bb1-a53f-1178c8dd8c32" (UID: "648ffab2-f448-4bb1-a53f-1178c8dd8c32"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543211 4693 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-httpd-run\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543235 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543250 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543258 4693 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543267 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543275 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/648ffab2-f448-4bb1-a53f-1178c8dd8c32-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543304 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543312 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq46t\" (UniqueName: \"kubernetes.io/projected/648ffab2-f448-4bb1-a53f-1178c8dd8c32-kube-api-access-tq46t\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.543321 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/648ffab2-f448-4bb1-a53f-1178c8dd8c32-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.571655 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.645290 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.721827 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-hcwm4"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.728101 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-dc3e-account-create-tplp4"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.733577 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-lm5mh"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.738355 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-a227-account-create-zx7zl"] Nov 22 09:18:59 crc kubenswrapper[4693]: W1122 09:18:59.740014 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0e7e12bd_2fc6_445e_96bd_092859596ae4.slice/crio-e9e34775714537b9d329f7eaff9a64d0ec24395be5a5e54b9590518ffac13301 WatchSource:0}: Error finding container e9e34775714537b9d329f7eaff9a64d0ec24395be5a5e54b9590518ffac13301: Status 404 returned error can't find the container with id e9e34775714537b9d329f7eaff9a64d0ec24395be5a5e54b9590518ffac13301 Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.743361 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-2bbd-account-create-phtjw"] Nov 22 09:18:59 crc kubenswrapper[4693]: W1122 09:18:59.749984 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc6f93f55_4ccc_49d4_b377_5fe5c859dd38.slice/crio-be0029c978617bae49d02cd1b22a0c2000d0d05159febeeba9dab7705710ccde WatchSource:0}: Error finding container be0029c978617bae49d02cd1b22a0c2000d0d05159febeeba9dab7705710ccde: Status 404 returned error can't find the container with id be0029c978617bae49d02cd1b22a0c2000d0d05159febeeba9dab7705710ccde Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.750675 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-269rq"] Nov 22 09:18:59 crc kubenswrapper[4693]: W1122 09:18:59.752398 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda9753c17_d077_44bf_a381_e3dd7e4aa505.slice/crio-27a8ee30aca7a1495024dd5975114b09e388e5c8470921dd15bf0bdd59cd9963 WatchSource:0}: Error finding container 27a8ee30aca7a1495024dd5975114b09e388e5c8470921dd15bf0bdd59cd9963: Status 404 returned error can't find the container with id 27a8ee30aca7a1495024dd5975114b09e388e5c8470921dd15bf0bdd59cd9963 Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.777097 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a227-account-create-zx7zl" event={"ID":"c6f93f55-4ccc-49d4-b377-5fe5c859dd38","Type":"ContainerStarted","Data":"be0029c978617bae49d02cd1b22a0c2000d0d05159febeeba9dab7705710ccde"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.781531 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lm5mh" event={"ID":"81fa2494-ad98-4781-8a71-e518b21b6509","Type":"ContainerStarted","Data":"b79e8784a84e2559e03f051da363a20d93ef54567d33466d4d5e37a3b129a1c9"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.785379 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-dc3e-account-create-tplp4" event={"ID":"eeb9054c-f2a0-4e1a-b95f-210fca359716","Type":"ContainerStarted","Data":"7e73315a13cc934060498acea5a4581e62f21229299ebfc0fb24d2f1beedf806"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.788063 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.788295 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"648ffab2-f448-4bb1-a53f-1178c8dd8c32","Type":"ContainerDied","Data":"b4a6bef1dfa5225bb1371161dcdb1e46af9c79f3deae47547bad2afc7c9fa395"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.788334 4693 scope.go:117] "RemoveContainer" containerID="aba38c12bf23ffadbc2f82662a7635f10a4c8679fec81c7deb2a47eb66442cbc" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.789403 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2bbd-account-create-phtjw" event={"ID":"a9753c17-d077-44bf-a381-e3dd7e4aa505","Type":"ContainerStarted","Data":"27a8ee30aca7a1495024dd5975114b09e388e5c8470921dd15bf0bdd59cd9963"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.793059 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-269rq" event={"ID":"b6f9b111-6452-4462-9e5e-fe38c86e823b","Type":"ContainerStarted","Data":"4e1e7c87ec618e6e2fc90953c2cd082e342e0b92f23397586dbdceeda7a71263"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.794900 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hcwm4" event={"ID":"0e7e12bd-2fc6-445e-96bd-092859596ae4","Type":"ContainerStarted","Data":"e9e34775714537b9d329f7eaff9a64d0ec24395be5a5e54b9590518ffac13301"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.798033 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4185887a-3a26-4abc-ab25-53033cc7e940","Type":"ContainerDied","Data":"ff22b15971f3f675dd812e2f7f4ce2c43d946d27f53a5eae904e85e4ab6c19da"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.798590 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.808130 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"e33acce5-e1e0-4dd7-96d3-620949d2e6ed","Type":"ContainerDied","Data":"ad3eca5eb74db8e804c7fa8e4445cc36b609e8c81e54386fa69e7dac43169b73"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.808216 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.814607 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d","Type":"ContainerStarted","Data":"6a16054be906bde0d4d361e04894328370d0aaf081a20e98663db1b0aab517d5"} Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.843031 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.986396144 podStartE2EDuration="11.843010615s" podCreationTimestamp="2025-11-22 09:18:48 +0000 UTC" firstStartedPulling="2025-11-22 09:18:49.011412422 +0000 UTC m=+925.153914713" lastFinishedPulling="2025-11-22 09:18:58.868026893 +0000 UTC m=+935.010529184" observedRunningTime="2025-11-22 09:18:59.828762226 +0000 UTC m=+935.971264518" watchObservedRunningTime="2025-11-22 09:18:59.843010615 +0000 UTC m=+935.985512905" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.899132 4693 scope.go:117] "RemoveContainer" containerID="cc046355824e69ccf74334f7538525fac3291381fab0182ebe08371f6df761e4" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.966679 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.975156 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.981172 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.985453 4693 scope.go:117] "RemoveContainer" containerID="f21b31973fff4ae5e32f576c352f67ac8875ad86f1f3234a5b38a8d5eb4443bf" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.985628 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.993899 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994535 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994557 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994570 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-central-agent" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994578 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-central-agent" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994591 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="sg-core" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994597 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="sg-core" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994611 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-log" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994617 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-log" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994632 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="proxy-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994637 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="proxy-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994656 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-notification-agent" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994662 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-notification-agent" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994678 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994685 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: E1122 09:18:59.994696 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-log" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994703 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-log" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994897 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="sg-core" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994908 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994918 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994932 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="proxy-httpd" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994945 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" containerName="glance-log" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994955 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" containerName="glance-log" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994965 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-notification-agent" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.994974 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" containerName="ceilometer-central-agent" Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.996145 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:18:59 crc kubenswrapper[4693]: I1122 09:18:59.996238 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.000411 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.001392 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.001781 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.002152 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.002427 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-cwf55" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.012234 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.014033 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.014453 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.018873 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.022638 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.027221 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.032517 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.036954 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.038575 4693 scope.go:117] "RemoveContainer" containerID="9af482365a16ff5995fc63fc8a8ccf084fb07bcdb6ca237f7923d5b95b8eaac7" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.039024 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.039219 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054605 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-scripts\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054650 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-config-data\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054681 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054705 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-log-httpd\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054733 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054748 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-run-httpd\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054766 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rnlpw\" (UniqueName: \"kubernetes.io/projected/0277864e-ceab-47f1-adbe-daebc97e97ba-kube-api-access-rnlpw\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054812 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-config-data\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.054833 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/722dcbf6-ae9d-444d-9839-24b0b0e900db-logs\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.055007 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/722dcbf6-ae9d-444d-9839-24b0b0e900db-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.055035 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.055085 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-scripts\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.055142 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsbrn\" (UniqueName: \"kubernetes.io/projected/722dcbf6-ae9d-444d-9839-24b0b0e900db-kube-api-access-qsbrn\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.055170 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.055189 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.062898 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.093742 4693 scope.go:117] "RemoveContainer" containerID="c65da8152721e6d62dd7a5a2d4ccb755bd75ce02af2c4e5f93c220c8fb5e6c14" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.141107 4693 scope.go:117] "RemoveContainer" containerID="da291a940212e51760a6510d87012fd7883b06ac24252cef062ed4da78eec894" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.156979 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsbrn\" (UniqueName: \"kubernetes.io/projected/722dcbf6-ae9d-444d-9839-24b0b0e900db-kube-api-access-qsbrn\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157026 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157044 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157070 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2586c0ae-8b14-49b8-8787-d14da284b596-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157105 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2586c0ae-8b14-49b8-8787-d14da284b596-logs\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157122 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-scripts\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157138 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-config-data\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.157159 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158152 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158187 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-log-httpd\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158210 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9jcf\" (UniqueName: \"kubernetes.io/projected/2586c0ae-8b14-49b8-8787-d14da284b596-kube-api-access-m9jcf\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158235 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158258 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-run-httpd\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158275 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rnlpw\" (UniqueName: \"kubernetes.io/projected/0277864e-ceab-47f1-adbe-daebc97e97ba-kube-api-access-rnlpw\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158297 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158318 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158345 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-config-data\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158366 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/722dcbf6-ae9d-444d-9839-24b0b0e900db-logs\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158427 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/722dcbf6-ae9d-444d-9839-24b0b0e900db-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158445 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158478 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158510 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-scripts\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.158526 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.160363 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.170515 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-scripts\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.171388 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-config-data\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.171959 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-log-httpd\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.172212 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-run-httpd\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.172649 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/722dcbf6-ae9d-444d-9839-24b0b0e900db-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.174027 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/722dcbf6-ae9d-444d-9839-24b0b0e900db-logs\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.174762 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsbrn\" (UniqueName: \"kubernetes.io/projected/722dcbf6-ae9d-444d-9839-24b0b0e900db-kube-api-access-qsbrn\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.176646 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rnlpw\" (UniqueName: \"kubernetes.io/projected/0277864e-ceab-47f1-adbe-daebc97e97ba-kube-api-access-rnlpw\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.179664 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-config-data\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.182543 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.188135 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.188783 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/722dcbf6-ae9d-444d-9839-24b0b0e900db-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.189667 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.191602 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4185887a-3a26-4abc-ab25-53033cc7e940" path="/var/lib/kubelet/pods/4185887a-3a26-4abc-ab25-53033cc7e940/volumes" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.210683 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="648ffab2-f448-4bb1-a53f-1178c8dd8c32" path="/var/lib/kubelet/pods/648ffab2-f448-4bb1-a53f-1178c8dd8c32/volumes" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.201324 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"glance-default-external-api-0\" (UID: \"722dcbf6-ae9d-444d-9839-24b0b0e900db\") " pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.198453 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-scripts\") pod \"ceilometer-0\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.212184 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e33acce5-e1e0-4dd7-96d3-620949d2e6ed" path="/var/lib/kubelet/pods/e33acce5-e1e0-4dd7-96d3-620949d2e6ed/volumes" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.227083 4693 scope.go:117] "RemoveContainer" containerID="94e8580de619ea53071d021e883694ef8a97f2451521f94e5370f31688423352" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.246946 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.246995 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.247045 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.248329 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"b6cafeb0ae9a0297d68d27b5e4364409ddbf47f49b1097384068670f8ea0353f"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.248399 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://b6cafeb0ae9a0297d68d27b5e4364409ddbf47f49b1097384068670f8ea0353f" gracePeriod=600 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259623 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259681 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259725 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2586c0ae-8b14-49b8-8787-d14da284b596-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259762 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2586c0ae-8b14-49b8-8787-d14da284b596-logs\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259787 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259804 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9jcf\" (UniqueName: \"kubernetes.io/projected/2586c0ae-8b14-49b8-8787-d14da284b596-kube-api-access-m9jcf\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259826 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.259868 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.260473 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.261283 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/2586c0ae-8b14-49b8-8787-d14da284b596-logs\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.261685 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/2586c0ae-8b14-49b8-8787-d14da284b596-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.265223 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.266087 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.268706 4693 scope.go:117] "RemoveContainer" containerID="dc8bee7fa151c8e85e188e167b44e73d8073a1fc91f60e2f436bf44e61f7ba2a" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.269581 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-config-data\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.276437 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2586c0ae-8b14-49b8-8787-d14da284b596-scripts\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.277297 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9jcf\" (UniqueName: \"kubernetes.io/projected/2586c0ae-8b14-49b8-8787-d14da284b596-kube-api-access-m9jcf\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.298358 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"glance-default-internal-api-0\" (UID: \"2586c0ae-8b14-49b8-8787-d14da284b596\") " pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.382850 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.416705 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.430432 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.824337 4693 generic.go:334] "Generic (PLEG): container finished" podID="a9753c17-d077-44bf-a381-e3dd7e4aa505" containerID="53e25c4a4e828a177a7d68eecd24c0bec484459f2be4ade2f284123a29496550" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.824527 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2bbd-account-create-phtjw" event={"ID":"a9753c17-d077-44bf-a381-e3dd7e4aa505","Type":"ContainerDied","Data":"53e25c4a4e828a177a7d68eecd24c0bec484459f2be4ade2f284123a29496550"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.826446 4693 generic.go:334] "Generic (PLEG): container finished" podID="81fa2494-ad98-4781-8a71-e518b21b6509" containerID="3f48aa8689853c6727c5c10ae7db5dba6c46700d9df2d0fa9387b002de53823e" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.826556 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lm5mh" event={"ID":"81fa2494-ad98-4781-8a71-e518b21b6509","Type":"ContainerDied","Data":"3f48aa8689853c6727c5c10ae7db5dba6c46700d9df2d0fa9387b002de53823e"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.828215 4693 generic.go:334] "Generic (PLEG): container finished" podID="0e7e12bd-2fc6-445e-96bd-092859596ae4" containerID="d57ee3b2f84c37a6926ed4183effbada0363b5c94eccacadd3007d2801f9fb3b" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.828286 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hcwm4" event={"ID":"0e7e12bd-2fc6-445e-96bd-092859596ae4","Type":"ContainerDied","Data":"d57ee3b2f84c37a6926ed4183effbada0363b5c94eccacadd3007d2801f9fb3b"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.829650 4693 generic.go:334] "Generic (PLEG): container finished" podID="b6f9b111-6452-4462-9e5e-fe38c86e823b" containerID="d3d99b046f44ee66d016c53e6c4d3e99fd158b053d16d82f13f10d781b80ed7c" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.829690 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-269rq" event={"ID":"b6f9b111-6452-4462-9e5e-fe38c86e823b","Type":"ContainerDied","Data":"d3d99b046f44ee66d016c53e6c4d3e99fd158b053d16d82f13f10d781b80ed7c"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.830946 4693 generic.go:334] "Generic (PLEG): container finished" podID="eeb9054c-f2a0-4e1a-b95f-210fca359716" containerID="61a34cd6187c8d44e38b9f52f5df1af81419259841c18643e21ca3cbdeb86cf9" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.831004 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-dc3e-account-create-tplp4" event={"ID":"eeb9054c-f2a0-4e1a-b95f-210fca359716","Type":"ContainerDied","Data":"61a34cd6187c8d44e38b9f52f5df1af81419259841c18643e21ca3cbdeb86cf9"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.849543 4693 generic.go:334] "Generic (PLEG): container finished" podID="c6f93f55-4ccc-49d4-b377-5fe5c859dd38" containerID="eea75b40711b7b68512f654e7b7a0c0e8239c4d95d9aee8f21af98cd4fffb2d2" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.849624 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a227-account-create-zx7zl" event={"ID":"c6f93f55-4ccc-49d4-b377-5fe5c859dd38","Type":"ContainerDied","Data":"eea75b40711b7b68512f654e7b7a0c0e8239c4d95d9aee8f21af98cd4fffb2d2"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.858448 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="b6cafeb0ae9a0297d68d27b5e4364409ddbf47f49b1097384068670f8ea0353f" exitCode=0 Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.858531 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"b6cafeb0ae9a0297d68d27b5e4364409ddbf47f49b1097384068670f8ea0353f"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.858577 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"bd7cdb6f5b6113a485898440d62015f0754d51b7c39f3dbb9f5870578aac65cf"} Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.858602 4693 scope.go:117] "RemoveContainer" containerID="fdd620c02223caa992b894c417a67e53ca6a47a2b100ed782945632677fd5de7" Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.864980 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: I1122 09:19:00.982223 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Nov 22 09:19:00 crc kubenswrapper[4693]: W1122 09:19:00.985257 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod722dcbf6_ae9d_444d_9839_24b0b0e900db.slice/crio-54801b9ed65c2f568fdcfbd31b1c1deee3fbae63eb50f68d6fae50d450b10130 WatchSource:0}: Error finding container 54801b9ed65c2f568fdcfbd31b1c1deee3fbae63eb50f68d6fae50d450b10130: Status 404 returned error can't find the container with id 54801b9ed65c2f568fdcfbd31b1c1deee3fbae63eb50f68d6fae50d450b10130 Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.072624 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Nov 22 09:19:01 crc kubenswrapper[4693]: W1122 09:19:01.082319 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2586c0ae_8b14_49b8_8787_d14da284b596.slice/crio-464b7f751bc50ea7813e204a02274c5bab0dc62164e1596ec49817a190eac8c6 WatchSource:0}: Error finding container 464b7f751bc50ea7813e204a02274c5bab0dc62164e1596ec49817a190eac8c6: Status 404 returned error can't find the container with id 464b7f751bc50ea7813e204a02274c5bab0dc62164e1596ec49817a190eac8c6 Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.237284 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.496737 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.497644 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-795d6c4bbf-rtw6h" Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.871000 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"722dcbf6-ae9d-444d-9839-24b0b0e900db","Type":"ContainerStarted","Data":"3148323dd98a818cbd4e5d31f40a5ef85caef627c72a518ab5e97cd3611b995f"} Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.871041 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"722dcbf6-ae9d-444d-9839-24b0b0e900db","Type":"ContainerStarted","Data":"54801b9ed65c2f568fdcfbd31b1c1deee3fbae63eb50f68d6fae50d450b10130"} Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.874373 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2586c0ae-8b14-49b8-8787-d14da284b596","Type":"ContainerStarted","Data":"22ef6c67a2d3b18f877a4e7c877464b997b56e06015a4239736c9f326c506cc7"} Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.874417 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2586c0ae-8b14-49b8-8787-d14da284b596","Type":"ContainerStarted","Data":"464b7f751bc50ea7813e204a02274c5bab0dc62164e1596ec49817a190eac8c6"} Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.878546 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerStarted","Data":"f6d5d49b412b3d2e94d1b7c80cbba4efa857781d542aaedd2bba10427e200db9"} Nov 22 09:19:01 crc kubenswrapper[4693]: I1122 09:19:01.878804 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerStarted","Data":"e9d4bc5d8903b7d044a60f76a10a3fbefe7b32d157826ceeab9c37acd7b8b37f"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.245020 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.308026 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9dq5d\" (UniqueName: \"kubernetes.io/projected/a9753c17-d077-44bf-a381-e3dd7e4aa505-kube-api-access-9dq5d\") pod \"a9753c17-d077-44bf-a381-e3dd7e4aa505\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.308291 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9753c17-d077-44bf-a381-e3dd7e4aa505-operator-scripts\") pod \"a9753c17-d077-44bf-a381-e3dd7e4aa505\" (UID: \"a9753c17-d077-44bf-a381-e3dd7e4aa505\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.309081 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a9753c17-d077-44bf-a381-e3dd7e4aa505-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a9753c17-d077-44bf-a381-e3dd7e4aa505" (UID: "a9753c17-d077-44bf-a381-e3dd7e4aa505"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.322181 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9753c17-d077-44bf-a381-e3dd7e4aa505-kube-api-access-9dq5d" (OuterVolumeSpecName: "kube-api-access-9dq5d") pod "a9753c17-d077-44bf-a381-e3dd7e4aa505" (UID: "a9753c17-d077-44bf-a381-e3dd7e4aa505"). InnerVolumeSpecName "kube-api-access-9dq5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.390050 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.409514 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-operator-scripts\") pod \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.409708 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kj92\" (UniqueName: \"kubernetes.io/projected/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-kube-api-access-4kj92\") pod \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\" (UID: \"c6f93f55-4ccc-49d4-b377-5fe5c859dd38\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.410117 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9dq5d\" (UniqueName: \"kubernetes.io/projected/a9753c17-d077-44bf-a381-e3dd7e4aa505-kube-api-access-9dq5d\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.410138 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a9753c17-d077-44bf-a381-e3dd7e4aa505-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.410439 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c6f93f55-4ccc-49d4-b377-5fe5c859dd38" (UID: "c6f93f55-4ccc-49d4-b377-5fe5c859dd38"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.413936 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.414540 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-kube-api-access-4kj92" (OuterVolumeSpecName: "kube-api-access-4kj92") pod "c6f93f55-4ccc-49d4-b377-5fe5c859dd38" (UID: "c6f93f55-4ccc-49d4-b377-5fe5c859dd38"). InnerVolumeSpecName "kube-api-access-4kj92". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.416956 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.419477 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.477560 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.510690 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6f9b111-6452-4462-9e5e-fe38c86e823b-operator-scripts\") pod \"b6f9b111-6452-4462-9e5e-fe38c86e823b\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.510799 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pb85p\" (UniqueName: \"kubernetes.io/projected/81fa2494-ad98-4781-8a71-e518b21b6509-kube-api-access-pb85p\") pod \"81fa2494-ad98-4781-8a71-e518b21b6509\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.510879 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eeb9054c-f2a0-4e1a-b95f-210fca359716-operator-scripts\") pod \"eeb9054c-f2a0-4e1a-b95f-210fca359716\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.510949 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mp6xq\" (UniqueName: \"kubernetes.io/projected/b6f9b111-6452-4462-9e5e-fe38c86e823b-kube-api-access-mp6xq\") pod \"b6f9b111-6452-4462-9e5e-fe38c86e823b\" (UID: \"b6f9b111-6452-4462-9e5e-fe38c86e823b\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.510996 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81fa2494-ad98-4781-8a71-e518b21b6509-operator-scripts\") pod \"81fa2494-ad98-4781-8a71-e518b21b6509\" (UID: \"81fa2494-ad98-4781-8a71-e518b21b6509\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511024 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e7e12bd-2fc6-445e-96bd-092859596ae4-operator-scripts\") pod \"0e7e12bd-2fc6-445e-96bd-092859596ae4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511079 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hzrwc\" (UniqueName: \"kubernetes.io/projected/eeb9054c-f2a0-4e1a-b95f-210fca359716-kube-api-access-hzrwc\") pod \"eeb9054c-f2a0-4e1a-b95f-210fca359716\" (UID: \"eeb9054c-f2a0-4e1a-b95f-210fca359716\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511105 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gsc2f\" (UniqueName: \"kubernetes.io/projected/0e7e12bd-2fc6-445e-96bd-092859596ae4-kube-api-access-gsc2f\") pod \"0e7e12bd-2fc6-445e-96bd-092859596ae4\" (UID: \"0e7e12bd-2fc6-445e-96bd-092859596ae4\") " Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511377 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6f9b111-6452-4462-9e5e-fe38c86e823b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b6f9b111-6452-4462-9e5e-fe38c86e823b" (UID: "b6f9b111-6452-4462-9e5e-fe38c86e823b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511933 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b6f9b111-6452-4462-9e5e-fe38c86e823b-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511957 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kj92\" (UniqueName: \"kubernetes.io/projected/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-kube-api-access-4kj92\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.511970 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c6f93f55-4ccc-49d4-b377-5fe5c859dd38-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.512147 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81fa2494-ad98-4781-8a71-e518b21b6509-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "81fa2494-ad98-4781-8a71-e518b21b6509" (UID: "81fa2494-ad98-4781-8a71-e518b21b6509"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.512156 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eeb9054c-f2a0-4e1a-b95f-210fca359716-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "eeb9054c-f2a0-4e1a-b95f-210fca359716" (UID: "eeb9054c-f2a0-4e1a-b95f-210fca359716"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.512592 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e7e12bd-2fc6-445e-96bd-092859596ae4-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e7e12bd-2fc6-445e-96bd-092859596ae4" (UID: "0e7e12bd-2fc6-445e-96bd-092859596ae4"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.514316 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81fa2494-ad98-4781-8a71-e518b21b6509-kube-api-access-pb85p" (OuterVolumeSpecName: "kube-api-access-pb85p") pod "81fa2494-ad98-4781-8a71-e518b21b6509" (UID: "81fa2494-ad98-4781-8a71-e518b21b6509"). InnerVolumeSpecName "kube-api-access-pb85p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.515736 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6f9b111-6452-4462-9e5e-fe38c86e823b-kube-api-access-mp6xq" (OuterVolumeSpecName: "kube-api-access-mp6xq") pod "b6f9b111-6452-4462-9e5e-fe38c86e823b" (UID: "b6f9b111-6452-4462-9e5e-fe38c86e823b"). InnerVolumeSpecName "kube-api-access-mp6xq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.516879 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eeb9054c-f2a0-4e1a-b95f-210fca359716-kube-api-access-hzrwc" (OuterVolumeSpecName: "kube-api-access-hzrwc") pod "eeb9054c-f2a0-4e1a-b95f-210fca359716" (UID: "eeb9054c-f2a0-4e1a-b95f-210fca359716"). InnerVolumeSpecName "kube-api-access-hzrwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.517129 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e7e12bd-2fc6-445e-96bd-092859596ae4-kube-api-access-gsc2f" (OuterVolumeSpecName: "kube-api-access-gsc2f") pod "0e7e12bd-2fc6-445e-96bd-092859596ae4" (UID: "0e7e12bd-2fc6-445e-96bd-092859596ae4"). InnerVolumeSpecName "kube-api-access-gsc2f". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612494 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mp6xq\" (UniqueName: \"kubernetes.io/projected/b6f9b111-6452-4462-9e5e-fe38c86e823b-kube-api-access-mp6xq\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612529 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/81fa2494-ad98-4781-8a71-e518b21b6509-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612539 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e7e12bd-2fc6-445e-96bd-092859596ae4-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612549 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hzrwc\" (UniqueName: \"kubernetes.io/projected/eeb9054c-f2a0-4e1a-b95f-210fca359716-kube-api-access-hzrwc\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612559 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gsc2f\" (UniqueName: \"kubernetes.io/projected/0e7e12bd-2fc6-445e-96bd-092859596ae4-kube-api-access-gsc2f\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612567 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pb85p\" (UniqueName: \"kubernetes.io/projected/81fa2494-ad98-4781-8a71-e518b21b6509-kube-api-access-pb85p\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.612575 4693 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/eeb9054c-f2a0-4e1a-b95f-210fca359716-operator-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.887362 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-dc3e-account-create-tplp4" event={"ID":"eeb9054c-f2a0-4e1a-b95f-210fca359716","Type":"ContainerDied","Data":"7e73315a13cc934060498acea5a4581e62f21229299ebfc0fb24d2f1beedf806"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.887438 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e73315a13cc934060498acea5a4581e62f21229299ebfc0fb24d2f1beedf806" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.887390 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-dc3e-account-create-tplp4" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.888751 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-2bbd-account-create-phtjw" event={"ID":"a9753c17-d077-44bf-a381-e3dd7e4aa505","Type":"ContainerDied","Data":"27a8ee30aca7a1495024dd5975114b09e388e5c8470921dd15bf0bdd59cd9963"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.888792 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="27a8ee30aca7a1495024dd5975114b09e388e5c8470921dd15bf0bdd59cd9963" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.888763 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-2bbd-account-create-phtjw" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.890641 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"2586c0ae-8b14-49b8-8787-d14da284b596","Type":"ContainerStarted","Data":"01be9dfc3bb6edc526b56cdced5cbc04fd88261cbf40c07e1f76e4778ff37354"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.898076 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"722dcbf6-ae9d-444d-9839-24b0b0e900db","Type":"ContainerStarted","Data":"c1b51e58b1d546de7b83aa1fc02286887c9ecf4afa72b917bd6ece01d3996ead"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.900563 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-269rq" event={"ID":"b6f9b111-6452-4462-9e5e-fe38c86e823b","Type":"ContainerDied","Data":"4e1e7c87ec618e6e2fc90953c2cd082e342e0b92f23397586dbdceeda7a71263"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.900663 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4e1e7c87ec618e6e2fc90953c2cd082e342e0b92f23397586dbdceeda7a71263" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.900995 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-269rq" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.903243 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-hcwm4" event={"ID":"0e7e12bd-2fc6-445e-96bd-092859596ae4","Type":"ContainerDied","Data":"e9e34775714537b9d329f7eaff9a64d0ec24395be5a5e54b9590518ffac13301"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.903355 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e9e34775714537b9d329f7eaff9a64d0ec24395be5a5e54b9590518ffac13301" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.903675 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-hcwm4" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.906161 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-a227-account-create-zx7zl" event={"ID":"c6f93f55-4ccc-49d4-b377-5fe5c859dd38","Type":"ContainerDied","Data":"be0029c978617bae49d02cd1b22a0c2000d0d05159febeeba9dab7705710ccde"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.906273 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="be0029c978617bae49d02cd1b22a0c2000d0d05159febeeba9dab7705710ccde" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.906456 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-a227-account-create-zx7zl" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.911893 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerStarted","Data":"6f55f44d169037850b88c722410bae695c825d1e54b3a54bc6d9839550c0f766"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.915156 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-lm5mh" event={"ID":"81fa2494-ad98-4781-8a71-e518b21b6509","Type":"ContainerDied","Data":"b79e8784a84e2559e03f051da363a20d93ef54567d33466d4d5e37a3b129a1c9"} Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.915289 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b79e8784a84e2559e03f051da363a20d93ef54567d33466d4d5e37a3b129a1c9" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.915745 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-lm5mh" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.929676 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.929659565 podStartE2EDuration="3.929659565s" podCreationTimestamp="2025-11-22 09:18:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:02.925061467 +0000 UTC m=+939.067563758" watchObservedRunningTime="2025-11-22 09:19:02.929659565 +0000 UTC m=+939.072161856" Nov 22 09:19:02 crc kubenswrapper[4693]: I1122 09:19:02.966167 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=3.966150886 podStartE2EDuration="3.966150886s" podCreationTimestamp="2025-11-22 09:18:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:02.955234194 +0000 UTC m=+939.097736485" watchObservedRunningTime="2025-11-22 09:19:02.966150886 +0000 UTC m=+939.108653177" Nov 22 09:19:03 crc kubenswrapper[4693]: I1122 09:19:03.695241 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-67dc677d8b-gqzpn" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.146:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.146:8443: connect: connection refused" Nov 22 09:19:03 crc kubenswrapper[4693]: I1122 09:19:03.695828 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:19:03 crc kubenswrapper[4693]: I1122 09:19:03.925416 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerStarted","Data":"4dbdbfe7f3ca7ceec1ea48dec4e3ff689a758ae0ee308489c7c9c762f3d16a53"} Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.939323 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerStarted","Data":"ffda7bbc1acc956bc55df03f10a0adaa518e2fc3c081d63d42be56d57fa3c0d1"} Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.941047 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-central-agent" containerID="cri-o://f6d5d49b412b3d2e94d1b7c80cbba4efa857781d542aaedd2bba10427e200db9" gracePeriod=30 Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.941430 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.941790 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="sg-core" containerID="cri-o://4dbdbfe7f3ca7ceec1ea48dec4e3ff689a758ae0ee308489c7c9c762f3d16a53" gracePeriod=30 Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.941904 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="proxy-httpd" containerID="cri-o://ffda7bbc1acc956bc55df03f10a0adaa518e2fc3c081d63d42be56d57fa3c0d1" gracePeriod=30 Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.942076 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-notification-agent" containerID="cri-o://6f55f44d169037850b88c722410bae695c825d1e54b3a54bc6d9839550c0f766" gracePeriod=30 Nov 22 09:19:04 crc kubenswrapper[4693]: I1122 09:19:04.966002 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.445062824 podStartE2EDuration="5.965984737s" podCreationTimestamp="2025-11-22 09:18:59 +0000 UTC" firstStartedPulling="2025-11-22 09:19:00.894955933 +0000 UTC m=+937.037458214" lastFinishedPulling="2025-11-22 09:19:04.415877836 +0000 UTC m=+940.558380127" observedRunningTime="2025-11-22 09:19:04.960719454 +0000 UTC m=+941.103221745" watchObservedRunningTime="2025-11-22 09:19:04.965984737 +0000 UTC m=+941.108487028" Nov 22 09:19:05 crc kubenswrapper[4693]: I1122 09:19:05.952152 4693 generic.go:334] "Generic (PLEG): container finished" podID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerID="ffda7bbc1acc956bc55df03f10a0adaa518e2fc3c081d63d42be56d57fa3c0d1" exitCode=0 Nov 22 09:19:05 crc kubenswrapper[4693]: I1122 09:19:05.952445 4693 generic.go:334] "Generic (PLEG): container finished" podID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerID="4dbdbfe7f3ca7ceec1ea48dec4e3ff689a758ae0ee308489c7c9c762f3d16a53" exitCode=2 Nov 22 09:19:05 crc kubenswrapper[4693]: I1122 09:19:05.952455 4693 generic.go:334] "Generic (PLEG): container finished" podID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerID="6f55f44d169037850b88c722410bae695c825d1e54b3a54bc6d9839550c0f766" exitCode=0 Nov 22 09:19:05 crc kubenswrapper[4693]: I1122 09:19:05.952227 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerDied","Data":"ffda7bbc1acc956bc55df03f10a0adaa518e2fc3c081d63d42be56d57fa3c0d1"} Nov 22 09:19:05 crc kubenswrapper[4693]: I1122 09:19:05.952501 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerDied","Data":"4dbdbfe7f3ca7ceec1ea48dec4e3ff689a758ae0ee308489c7c9c762f3d16a53"} Nov 22 09:19:05 crc kubenswrapper[4693]: I1122 09:19:05.952522 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerDied","Data":"6f55f44d169037850b88c722410bae695c825d1e54b3a54bc6d9839550c0f766"} Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063206 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8g5n7"] Nov 22 09:19:06 crc kubenswrapper[4693]: E1122 09:19:06.063665 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e7e12bd-2fc6-445e-96bd-092859596ae4" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063682 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e7e12bd-2fc6-445e-96bd-092859596ae4" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: E1122 09:19:06.063710 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eeb9054c-f2a0-4e1a-b95f-210fca359716" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063716 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="eeb9054c-f2a0-4e1a-b95f-210fca359716" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: E1122 09:19:06.063731 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b6f9b111-6452-4462-9e5e-fe38c86e823b" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063737 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b6f9b111-6452-4462-9e5e-fe38c86e823b" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: E1122 09:19:06.063747 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c6f93f55-4ccc-49d4-b377-5fe5c859dd38" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063752 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c6f93f55-4ccc-49d4-b377-5fe5c859dd38" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: E1122 09:19:06.063765 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9753c17-d077-44bf-a381-e3dd7e4aa505" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063771 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9753c17-d077-44bf-a381-e3dd7e4aa505" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: E1122 09:19:06.063795 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81fa2494-ad98-4781-8a71-e518b21b6509" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.063801 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="81fa2494-ad98-4781-8a71-e518b21b6509" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064022 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9753c17-d077-44bf-a381-e3dd7e4aa505" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064037 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="b6f9b111-6452-4462-9e5e-fe38c86e823b" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064045 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e7e12bd-2fc6-445e-96bd-092859596ae4" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064063 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c6f93f55-4ccc-49d4-b377-5fe5c859dd38" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064072 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="eeb9054c-f2a0-4e1a-b95f-210fca359716" containerName="mariadb-account-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064092 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="81fa2494-ad98-4781-8a71-e518b21b6509" containerName="mariadb-database-create" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.064824 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.066940 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.071143 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-f7hmr" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.071395 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.085809 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8g5n7"] Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.191887 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-scripts\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.191961 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-config-data\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.192049 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.192159 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4qmx\" (UniqueName: \"kubernetes.io/projected/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-kube-api-access-d4qmx\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.294654 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-config-data\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.294702 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.294775 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4qmx\" (UniqueName: \"kubernetes.io/projected/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-kube-api-access-d4qmx\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.294934 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-scripts\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.300922 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-scripts\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.311602 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4qmx\" (UniqueName: \"kubernetes.io/projected/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-kube-api-access-d4qmx\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.312117 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-config-data\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.318099 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-8g5n7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.381755 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.808428 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8g5n7"] Nov 22 09:19:06 crc kubenswrapper[4693]: I1122 09:19:06.961664 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" event={"ID":"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7","Type":"ContainerStarted","Data":"e237a76b73377b5ca3f6bba2f1e3dff3e44f5ca19e74ce000abbeadd47a0a3cb"} Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.914692 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.973436 4693 generic.go:334] "Generic (PLEG): container finished" podID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerID="8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82" exitCode=137 Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.973490 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dc677d8b-gqzpn" event={"ID":"77e4ba75-fe85-4b6f-8946-30ab162512bc","Type":"ContainerDied","Data":"8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82"} Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.973515 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-67dc677d8b-gqzpn" event={"ID":"77e4ba75-fe85-4b6f-8946-30ab162512bc","Type":"ContainerDied","Data":"fdab056b703451216023b15dadb963adc18f55552853ce193b79a980fb99e1f8"} Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.973532 4693 scope.go:117] "RemoveContainer" containerID="fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30" Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.973637 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-67dc677d8b-gqzpn" Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.980960 4693 generic.go:334] "Generic (PLEG): container finished" podID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerID="f6d5d49b412b3d2e94d1b7c80cbba4efa857781d542aaedd2bba10427e200db9" exitCode=0 Nov 22 09:19:07 crc kubenswrapper[4693]: I1122 09:19:07.980985 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerDied","Data":"f6d5d49b412b3d2e94d1b7c80cbba4efa857781d542aaedd2bba10427e200db9"} Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.017839 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030121 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-scripts\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030167 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-config-data\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030194 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-combined-ca-bundle\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030223 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9kd4\" (UniqueName: \"kubernetes.io/projected/77e4ba75-fe85-4b6f-8946-30ab162512bc-kube-api-access-q9kd4\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030317 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-secret-key\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030414 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-tls-certs\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.030476 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77e4ba75-fe85-4b6f-8946-30ab162512bc-logs\") pod \"77e4ba75-fe85-4b6f-8946-30ab162512bc\" (UID: \"77e4ba75-fe85-4b6f-8946-30ab162512bc\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.031328 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/77e4ba75-fe85-4b6f-8946-30ab162512bc-logs" (OuterVolumeSpecName: "logs") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.045018 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.047392 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/77e4ba75-fe85-4b6f-8946-30ab162512bc-kube-api-access-q9kd4" (OuterVolumeSpecName: "kube-api-access-q9kd4") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "kube-api-access-q9kd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.058606 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-scripts" (OuterVolumeSpecName: "scripts") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.084636 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.086395 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-config-data" (OuterVolumeSpecName: "config-data") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.096506 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "77e4ba75-fe85-4b6f-8946-30ab162512bc" (UID: "77e4ba75-fe85-4b6f-8946-30ab162512bc"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.131961 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnlpw\" (UniqueName: \"kubernetes.io/projected/0277864e-ceab-47f1-adbe-daebc97e97ba-kube-api-access-rnlpw\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.132273 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-scripts\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.132515 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-run-httpd\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.132605 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-log-httpd\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.132666 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-combined-ca-bundle\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.132701 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-config-data\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.132723 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-sg-core-conf-yaml\") pod \"0277864e-ceab-47f1-adbe-daebc97e97ba\" (UID: \"0277864e-ceab-47f1-adbe-daebc97e97ba\") " Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133113 4693 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133130 4693 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133139 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/77e4ba75-fe85-4b6f-8946-30ab162512bc-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133183 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133193 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/77e4ba75-fe85-4b6f-8946-30ab162512bc-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133201 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/77e4ba75-fe85-4b6f-8946-30ab162512bc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133209 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9kd4\" (UniqueName: \"kubernetes.io/projected/77e4ba75-fe85-4b6f-8946-30ab162512bc-kube-api-access-q9kd4\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133135 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.133944 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.135937 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0277864e-ceab-47f1-adbe-daebc97e97ba-kube-api-access-rnlpw" (OuterVolumeSpecName: "kube-api-access-rnlpw") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "kube-api-access-rnlpw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.136276 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-scripts" (OuterVolumeSpecName: "scripts") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.152877 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.187386 4693 scope.go:117] "RemoveContainer" containerID="8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.210525 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.211742 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-config-data" (OuterVolumeSpecName: "config-data") pod "0277864e-ceab-47f1-adbe-daebc97e97ba" (UID: "0277864e-ceab-47f1-adbe-daebc97e97ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.216528 4693 scope.go:117] "RemoveContainer" containerID="fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30" Nov 22 09:19:08 crc kubenswrapper[4693]: E1122 09:19:08.216939 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30\": container with ID starting with fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30 not found: ID does not exist" containerID="fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.216980 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30"} err="failed to get container status \"fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30\": rpc error: code = NotFound desc = could not find container \"fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30\": container with ID starting with fdcff0ff3ab9afbd5987c90152cc3c1156c9351372540b25531b45401d316c30 not found: ID does not exist" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.217004 4693 scope.go:117] "RemoveContainer" containerID="8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82" Nov 22 09:19:08 crc kubenswrapper[4693]: E1122 09:19:08.217985 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82\": container with ID starting with 8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82 not found: ID does not exist" containerID="8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.218054 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82"} err="failed to get container status \"8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82\": rpc error: code = NotFound desc = could not find container \"8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82\": container with ID starting with 8989c1a6e52824566ec0cfae43eea78e6d98942556d8f19d6eca9afc3f2a6b82 not found: ID does not exist" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235672 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnlpw\" (UniqueName: \"kubernetes.io/projected/0277864e-ceab-47f1-adbe-daebc97e97ba-kube-api-access-rnlpw\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235699 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235709 4693 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235721 4693 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0277864e-ceab-47f1-adbe-daebc97e97ba-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235730 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235738 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.235746 4693 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0277864e-ceab-47f1-adbe-daebc97e97ba-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.295679 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-67dc677d8b-gqzpn"] Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.307105 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-67dc677d8b-gqzpn"] Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.995237 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0277864e-ceab-47f1-adbe-daebc97e97ba","Type":"ContainerDied","Data":"e9d4bc5d8903b7d044a60f76a10a3fbefe7b32d157826ceeab9c37acd7b8b37f"} Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.995293 4693 scope.go:117] "RemoveContainer" containerID="ffda7bbc1acc956bc55df03f10a0adaa518e2fc3c081d63d42be56d57fa3c0d1" Nov 22 09:19:08 crc kubenswrapper[4693]: I1122 09:19:08.995297 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.016913 4693 scope.go:117] "RemoveContainer" containerID="4dbdbfe7f3ca7ceec1ea48dec4e3ff689a758ae0ee308489c7c9c762f3d16a53" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.022523 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.031087 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.043866 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:09 crc kubenswrapper[4693]: E1122 09:19:09.044304 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="sg-core" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044322 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="sg-core" Nov 22 09:19:09 crc kubenswrapper[4693]: E1122 09:19:09.044330 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon-log" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044336 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon-log" Nov 22 09:19:09 crc kubenswrapper[4693]: E1122 09:19:09.044358 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-central-agent" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044365 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-central-agent" Nov 22 09:19:09 crc kubenswrapper[4693]: E1122 09:19:09.044374 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-notification-agent" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044379 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-notification-agent" Nov 22 09:19:09 crc kubenswrapper[4693]: E1122 09:19:09.044389 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044395 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" Nov 22 09:19:09 crc kubenswrapper[4693]: E1122 09:19:09.044407 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="proxy-httpd" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044412 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="proxy-httpd" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044562 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon-log" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044571 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-central-agent" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044579 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="sg-core" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044592 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="ceilometer-notification-agent" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044600 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" containerName="horizon" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.044610 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" containerName="proxy-httpd" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.046141 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.046979 4693 scope.go:117] "RemoveContainer" containerID="6f55f44d169037850b88c722410bae695c825d1e54b3a54bc6d9839550c0f766" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.048700 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.055550 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.056524 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.071891 4693 scope.go:117] "RemoveContainer" containerID="f6d5d49b412b3d2e94d1b7c80cbba4efa857781d542aaedd2bba10427e200db9" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.157785 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.157855 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch4ns\" (UniqueName: \"kubernetes.io/projected/29abb84d-a3e1-4aa2-8056-b941aa84658c-kube-api-access-ch4ns\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.157900 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-run-httpd\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.157937 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-config-data\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.157965 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-scripts\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.157996 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.158035 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-log-httpd\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259178 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-log-httpd\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259210 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259256 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch4ns\" (UniqueName: \"kubernetes.io/projected/29abb84d-a3e1-4aa2-8056-b941aa84658c-kube-api-access-ch4ns\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259290 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-run-httpd\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259325 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-config-data\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259364 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-scripts\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.259396 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.260661 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-log-httpd\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.260677 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-run-httpd\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.263888 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.264979 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-config-data\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.273882 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.274242 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-scripts\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.274881 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch4ns\" (UniqueName: \"kubernetes.io/projected/29abb84d-a3e1-4aa2-8056-b941aa84658c-kube-api-access-ch4ns\") pod \"ceilometer-0\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.369421 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:09 crc kubenswrapper[4693]: I1122 09:19:09.775505 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.007121 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerStarted","Data":"c1ad0a47dbd087023aef0cb5073ea2b784086bf905e35ffbd8a5a7d5dda68892"} Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.154788 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0277864e-ceab-47f1-adbe-daebc97e97ba" path="/var/lib/kubelet/pods/0277864e-ceab-47f1-adbe-daebc97e97ba/volumes" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.156009 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="77e4ba75-fe85-4b6f-8946-30ab162512bc" path="/var/lib/kubelet/pods/77e4ba75-fe85-4b6f-8946-30ab162512bc/volumes" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.383019 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.383064 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.411323 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.419479 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.430624 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.430655 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.469783 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:10 crc kubenswrapper[4693]: I1122 09:19:10.479355 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:11 crc kubenswrapper[4693]: I1122 09:19:11.018449 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerStarted","Data":"c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92"} Nov 22 09:19:11 crc kubenswrapper[4693]: I1122 09:19:11.018507 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 09:19:11 crc kubenswrapper[4693]: I1122 09:19:11.020198 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:11 crc kubenswrapper[4693]: I1122 09:19:11.020229 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Nov 22 09:19:11 crc kubenswrapper[4693]: I1122 09:19:11.020239 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:11 crc kubenswrapper[4693]: I1122 09:19:11.922902 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:12 crc kubenswrapper[4693]: I1122 09:19:12.025964 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerStarted","Data":"06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a"} Nov 22 09:19:12 crc kubenswrapper[4693]: I1122 09:19:12.695621 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 09:19:12 crc kubenswrapper[4693]: I1122 09:19:12.708204 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Nov 22 09:19:12 crc kubenswrapper[4693]: I1122 09:19:12.889479 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:12 crc kubenswrapper[4693]: I1122 09:19:12.917705 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Nov 22 09:19:17 crc kubenswrapper[4693]: I1122 09:19:17.066426 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" event={"ID":"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7","Type":"ContainerStarted","Data":"c69d165f5c92a795d067c3eadc5f9d8f4fa0cf6bc95e8140e5f54fe920bb3be1"} Nov 22 09:19:17 crc kubenswrapper[4693]: I1122 09:19:17.069114 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerStarted","Data":"fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb"} Nov 22 09:19:17 crc kubenswrapper[4693]: I1122 09:19:17.087781 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" podStartSLOduration=1.099949385 podStartE2EDuration="11.087771336s" podCreationTimestamp="2025-11-22 09:19:06 +0000 UTC" firstStartedPulling="2025-11-22 09:19:06.815454587 +0000 UTC m=+942.957956879" lastFinishedPulling="2025-11-22 09:19:16.803276538 +0000 UTC m=+952.945778830" observedRunningTime="2025-11-22 09:19:17.081786549 +0000 UTC m=+953.224288840" watchObservedRunningTime="2025-11-22 09:19:17.087771336 +0000 UTC m=+953.230273626" Nov 22 09:19:19 crc kubenswrapper[4693]: I1122 09:19:19.083095 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerStarted","Data":"f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e"} Nov 22 09:19:19 crc kubenswrapper[4693]: I1122 09:19:19.083404 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-central-agent" containerID="cri-o://c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92" gracePeriod=30 Nov 22 09:19:19 crc kubenswrapper[4693]: I1122 09:19:19.083569 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:19:19 crc kubenswrapper[4693]: I1122 09:19:19.083637 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="proxy-httpd" containerID="cri-o://f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e" gracePeriod=30 Nov 22 09:19:19 crc kubenswrapper[4693]: I1122 09:19:19.083747 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="sg-core" containerID="cri-o://fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb" gracePeriod=30 Nov 22 09:19:19 crc kubenswrapper[4693]: I1122 09:19:19.083816 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-notification-agent" containerID="cri-o://06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a" gracePeriod=30 Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.092140 4693 generic.go:334] "Generic (PLEG): container finished" podID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerID="f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e" exitCode=0 Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.092435 4693 generic.go:334] "Generic (PLEG): container finished" podID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerID="fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb" exitCode=2 Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.092444 4693 generic.go:334] "Generic (PLEG): container finished" podID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerID="c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92" exitCode=0 Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.092214 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerDied","Data":"f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e"} Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.092479 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerDied","Data":"fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb"} Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.092493 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerDied","Data":"c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92"} Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.328243 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375090 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-log-httpd\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375189 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-sg-core-conf-yaml\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375213 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-combined-ca-bundle\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375235 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-config-data\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375323 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-scripts\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375447 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch4ns\" (UniqueName: \"kubernetes.io/projected/29abb84d-a3e1-4aa2-8056-b941aa84658c-kube-api-access-ch4ns\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375483 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-run-httpd\") pod \"29abb84d-a3e1-4aa2-8056-b941aa84658c\" (UID: \"29abb84d-a3e1-4aa2-8056-b941aa84658c\") " Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.375901 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.376508 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.381411 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29abb84d-a3e1-4aa2-8056-b941aa84658c-kube-api-access-ch4ns" (OuterVolumeSpecName: "kube-api-access-ch4ns") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "kube-api-access-ch4ns". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.381513 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-scripts" (OuterVolumeSpecName: "scripts") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.396474 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.427199 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.455316 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-config-data" (OuterVolumeSpecName: "config-data") pod "29abb84d-a3e1-4aa2-8056-b941aa84658c" (UID: "29abb84d-a3e1-4aa2-8056-b941aa84658c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477918 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch4ns\" (UniqueName: \"kubernetes.io/projected/29abb84d-a3e1-4aa2-8056-b941aa84658c-kube-api-access-ch4ns\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477941 4693 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477951 4693 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/29abb84d-a3e1-4aa2-8056-b941aa84658c-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477960 4693 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477968 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477977 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:20 crc kubenswrapper[4693]: I1122 09:19:20.477984 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/29abb84d-a3e1-4aa2-8056-b941aa84658c-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.102106 4693 generic.go:334] "Generic (PLEG): container finished" podID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerID="06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a" exitCode=0 Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.102145 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerDied","Data":"06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a"} Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.102158 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.102175 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"29abb84d-a3e1-4aa2-8056-b941aa84658c","Type":"ContainerDied","Data":"c1ad0a47dbd087023aef0cb5073ea2b784086bf905e35ffbd8a5a7d5dda68892"} Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.102192 4693 scope.go:117] "RemoveContainer" containerID="f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.119780 4693 scope.go:117] "RemoveContainer" containerID="fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.124233 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.128900 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.139229 4693 scope.go:117] "RemoveContainer" containerID="06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.147932 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.148735 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="proxy-httpd" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.148754 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="proxy-httpd" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.148771 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="sg-core" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.148789 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="sg-core" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.148815 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-notification-agent" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.148821 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-notification-agent" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.148854 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-central-agent" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.148862 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-central-agent" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.149207 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="sg-core" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.149230 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-notification-agent" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.149246 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="proxy-httpd" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.149272 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" containerName="ceilometer-central-agent" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.152454 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.155996 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.156073 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.161725 4693 scope.go:117] "RemoveContainer" containerID="c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.175932 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.180320 4693 scope.go:117] "RemoveContainer" containerID="f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.180674 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e\": container with ID starting with f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e not found: ID does not exist" containerID="f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.180710 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e"} err="failed to get container status \"f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e\": rpc error: code = NotFound desc = could not find container \"f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e\": container with ID starting with f45e0a3e0343aecbfb51eae02d5836672ef7dfdd66630a4994a3dddfcc92609e not found: ID does not exist" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.180729 4693 scope.go:117] "RemoveContainer" containerID="fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.181004 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb\": container with ID starting with fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb not found: ID does not exist" containerID="fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.181025 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb"} err="failed to get container status \"fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb\": rpc error: code = NotFound desc = could not find container \"fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb\": container with ID starting with fff827c29d5d0a2b1e2a4d62c2e74c0ef1907229c8b1353128380ea5d6836ccb not found: ID does not exist" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.181036 4693 scope.go:117] "RemoveContainer" containerID="06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.181311 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a\": container with ID starting with 06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a not found: ID does not exist" containerID="06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.181338 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a"} err="failed to get container status \"06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a\": rpc error: code = NotFound desc = could not find container \"06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a\": container with ID starting with 06c941745b4c62aad8026a284e23f9b8be785a346ad69d9be820350b6c646e0a not found: ID does not exist" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.181351 4693 scope.go:117] "RemoveContainer" containerID="c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92" Nov 22 09:19:21 crc kubenswrapper[4693]: E1122 09:19:21.181532 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92\": container with ID starting with c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92 not found: ID does not exist" containerID="c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.181563 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92"} err="failed to get container status \"c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92\": rpc error: code = NotFound desc = could not find container \"c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92\": container with ID starting with c773ebc8067ec826842cf5a2784be53d9f66e83e2be881efec88e4ecef2a1f92 not found: ID does not exist" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290124 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290173 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-log-httpd\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290196 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-run-httpd\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290293 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290313 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-config-data\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290383 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnnq8\" (UniqueName: \"kubernetes.io/projected/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-kube-api-access-mnnq8\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.290451 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-scripts\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391010 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnnq8\" (UniqueName: \"kubernetes.io/projected/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-kube-api-access-mnnq8\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391056 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-scripts\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391135 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391162 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-log-httpd\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391181 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-run-httpd\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391205 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391718 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-config-data\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.391970 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-log-httpd\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.392031 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-run-httpd\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.396357 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.396510 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-scripts\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.396782 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.397509 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-config-data\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.403974 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnnq8\" (UniqueName: \"kubernetes.io/projected/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-kube-api-access-mnnq8\") pod \"ceilometer-0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.480669 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:21 crc kubenswrapper[4693]: I1122 09:19:21.843601 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:21 crc kubenswrapper[4693]: W1122 09:19:21.845193 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4631c5a1_a696_4b97_bcd8_c2f5330b31f0.slice/crio-42a15390e513941c0ff7977f988b7325a99da37d781121f3e607dbf17924ba34 WatchSource:0}: Error finding container 42a15390e513941c0ff7977f988b7325a99da37d781121f3e607dbf17924ba34: Status 404 returned error can't find the container with id 42a15390e513941c0ff7977f988b7325a99da37d781121f3e607dbf17924ba34 Nov 22 09:19:22 crc kubenswrapper[4693]: I1122 09:19:22.112021 4693 generic.go:334] "Generic (PLEG): container finished" podID="d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" containerID="c69d165f5c92a795d067c3eadc5f9d8f4fa0cf6bc95e8140e5f54fe920bb3be1" exitCode=0 Nov 22 09:19:22 crc kubenswrapper[4693]: I1122 09:19:22.112139 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" event={"ID":"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7","Type":"ContainerDied","Data":"c69d165f5c92a795d067c3eadc5f9d8f4fa0cf6bc95e8140e5f54fe920bb3be1"} Nov 22 09:19:22 crc kubenswrapper[4693]: I1122 09:19:22.115088 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerStarted","Data":"42a15390e513941c0ff7977f988b7325a99da37d781121f3e607dbf17924ba34"} Nov 22 09:19:22 crc kubenswrapper[4693]: I1122 09:19:22.154829 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29abb84d-a3e1-4aa2-8056-b941aa84658c" path="/var/lib/kubelet/pods/29abb84d-a3e1-4aa2-8056-b941aa84658c/volumes" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.130373 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerStarted","Data":"6c25848e779b80c20dec2e49feacb88fc030de2be00fa95d42ed95a8e9370f64"} Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.131020 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerStarted","Data":"86aefc698cf1649569b36e22daea9185d26af25c491ff570c499c58c999a088e"} Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.384723 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.525710 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-config-data\") pod \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.525861 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-combined-ca-bundle\") pod \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.525903 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-scripts\") pod \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.525966 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4qmx\" (UniqueName: \"kubernetes.io/projected/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-kube-api-access-d4qmx\") pod \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\" (UID: \"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7\") " Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.531302 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-scripts" (OuterVolumeSpecName: "scripts") pod "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" (UID: "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.531688 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-kube-api-access-d4qmx" (OuterVolumeSpecName: "kube-api-access-d4qmx") pod "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" (UID: "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7"). InnerVolumeSpecName "kube-api-access-d4qmx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.546520 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-config-data" (OuterVolumeSpecName: "config-data") pod "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" (UID: "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.552438 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" (UID: "d8877d22-10d7-4304-8edc-7b0cfbf7f6c7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.628892 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.628917 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.628944 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4qmx\" (UniqueName: \"kubernetes.io/projected/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-kube-api-access-d4qmx\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:23 crc kubenswrapper[4693]: I1122 09:19:23.628953 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.146654 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.167067 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-8g5n7" event={"ID":"d8877d22-10d7-4304-8edc-7b0cfbf7f6c7","Type":"ContainerDied","Data":"e237a76b73377b5ca3f6bba2f1e3dff3e44f5ca19e74ce000abbeadd47a0a3cb"} Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.167118 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e237a76b73377b5ca3f6bba2f1e3dff3e44f5ca19e74ce000abbeadd47a0a3cb" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.167131 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerStarted","Data":"edf9f226646ba2ecdb8e63f567ef5a7e23dd6d22d7b3327bfb57bee50e5caf9a"} Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.215316 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 09:19:24 crc kubenswrapper[4693]: E1122 09:19:24.220493 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" containerName="nova-cell0-conductor-db-sync" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.220524 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" containerName="nova-cell0-conductor-db-sync" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.221092 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" containerName="nova-cell0-conductor-db-sync" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.225400 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.227585 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.235616 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-f7hmr" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.235742 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.345342 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.345405 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.345444 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjjr2\" (UniqueName: \"kubernetes.io/projected/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-kube-api-access-vjjr2\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.446591 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.446666 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.446723 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjjr2\" (UniqueName: \"kubernetes.io/projected/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-kube-api-access-vjjr2\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.457119 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.457210 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.458920 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjjr2\" (UniqueName: \"kubernetes.io/projected/a4da75dc-7806-41ea-9fdd-d7ed1ca362d3-kube-api-access-vjjr2\") pod \"nova-cell0-conductor-0\" (UID: \"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3\") " pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.547271 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:24 crc kubenswrapper[4693]: I1122 09:19:24.929852 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Nov 22 09:19:24 crc kubenswrapper[4693]: W1122 09:19:24.933378 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4da75dc_7806_41ea_9fdd_d7ed1ca362d3.slice/crio-e8d8214a05421fb252d983128a3b0a70821f6c4b10f2ad1b10124c19a70678c0 WatchSource:0}: Error finding container e8d8214a05421fb252d983128a3b0a70821f6c4b10f2ad1b10124c19a70678c0: Status 404 returned error can't find the container with id e8d8214a05421fb252d983128a3b0a70821f6c4b10f2ad1b10124c19a70678c0 Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.166453 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3","Type":"ContainerStarted","Data":"7d57bac5dfbf1422fb650af39542579c2c580a0589483414706be776181e62fe"} Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.166773 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.166787 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"a4da75dc-7806-41ea-9fdd-d7ed1ca362d3","Type":"ContainerStarted","Data":"e8d8214a05421fb252d983128a3b0a70821f6c4b10f2ad1b10124c19a70678c0"} Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.168461 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerStarted","Data":"a8b67b5aa1bcf6f834857569b3519054893bd6bac1acd48be5af816e126c0dfa"} Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.168578 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.178686 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=1.178671188 podStartE2EDuration="1.178671188s" podCreationTimestamp="2025-11-22 09:19:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:25.177670356 +0000 UTC m=+961.320172647" watchObservedRunningTime="2025-11-22 09:19:25.178671188 +0000 UTC m=+961.321173478" Nov 22 09:19:25 crc kubenswrapper[4693]: I1122 09:19:25.198286 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.193128675 podStartE2EDuration="4.198272644s" podCreationTimestamp="2025-11-22 09:19:21 +0000 UTC" firstStartedPulling="2025-11-22 09:19:21.847302288 +0000 UTC m=+957.989804579" lastFinishedPulling="2025-11-22 09:19:24.852446257 +0000 UTC m=+960.994948548" observedRunningTime="2025-11-22 09:19:25.190665727 +0000 UTC m=+961.333168018" watchObservedRunningTime="2025-11-22 09:19:25.198272644 +0000 UTC m=+961.340774935" Nov 22 09:19:34 crc kubenswrapper[4693]: I1122 09:19:34.574197 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.003231 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-ljh2g"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.006449 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.012720 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.015710 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.038875 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-ljh2g"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.100430 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.102008 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.105302 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.111752 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.111919 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-scripts\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.111984 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tr5v\" (UniqueName: \"kubernetes.io/projected/7c78d850-6c37-4f3b-920c-d64e4c136e47-kube-api-access-9tr5v\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.112008 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-config-data\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.114588 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.165622 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.167314 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.182258 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.207117 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216743 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216776 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216797 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216814 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-config-data\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216855 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16b10e86-b376-49d8-825e-219b1e61e7ee-logs\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216891 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-94kkx\" (UniqueName: \"kubernetes.io/projected/b3641f20-af8a-4a72-8a9a-11b372eef017-kube-api-access-94kkx\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216908 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7k49g\" (UniqueName: \"kubernetes.io/projected/16b10e86-b376-49d8-825e-219b1e61e7ee-kube-api-access-7k49g\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.216969 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-scripts\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.217000 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tr5v\" (UniqueName: \"kubernetes.io/projected/7c78d850-6c37-4f3b-920c-d64e4c136e47-kube-api-access-9tr5v\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.217019 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-config-data\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.217033 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-config-data\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.217053 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3641f20-af8a-4a72-8a9a-11b372eef017-logs\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.225947 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-config-data\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.230216 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-scripts\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.231336 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.243754 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-znqsl"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.250337 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.253479 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tr5v\" (UniqueName: \"kubernetes.io/projected/7c78d850-6c37-4f3b-920c-d64e4c136e47-kube-api-access-9tr5v\") pod \"nova-cell0-cell-mapping-ljh2g\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.270889 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-znqsl"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.283146 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.284575 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.288564 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.303876 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.305463 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.308410 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.322983 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323102 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323158 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323184 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323210 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-config-data\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323521 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323591 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3641f20-af8a-4a72-8a9a-11b372eef017-logs\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323726 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-config\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323758 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjvqt\" (UniqueName: \"kubernetes.io/projected/cec12aaf-a087-48db-b983-9d75767c38d1-kube-api-access-rjvqt\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323889 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323910 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.323926 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324084 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-config-data\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324109 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16b10e86-b376-49d8-825e-219b1e61e7ee-logs\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324127 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fl52z\" (UniqueName: \"kubernetes.io/projected/bed13960-e242-4593-a44d-da28a74d4b76-kube-api-access-fl52z\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324149 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-config-data\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324166 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-94kkx\" (UniqueName: \"kubernetes.io/projected/b3641f20-af8a-4a72-8a9a-11b372eef017-kube-api-access-94kkx\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324185 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7k49g\" (UniqueName: \"kubernetes.io/projected/16b10e86-b376-49d8-825e-219b1e61e7ee-kube-api-access-7k49g\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.324198 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3641f20-af8a-4a72-8a9a-11b372eef017-logs\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.325023 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16b10e86-b376-49d8-825e-219b1e61e7ee-logs\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.335460 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.336336 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-config-data\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.338654 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.338976 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-94kkx\" (UniqueName: \"kubernetes.io/projected/b3641f20-af8a-4a72-8a9a-11b372eef017-kube-api-access-94kkx\") pod \"nova-api-0\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.339024 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.339122 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.344439 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7k49g\" (UniqueName: \"kubernetes.io/projected/16b10e86-b376-49d8-825e-219b1e61e7ee-kube-api-access-7k49g\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.358525 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-config-data\") pod \"nova-metadata-0\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426377 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-config\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426427 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjvqt\" (UniqueName: \"kubernetes.io/projected/cec12aaf-a087-48db-b983-9d75767c38d1-kube-api-access-rjvqt\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426464 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426488 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426512 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426547 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fl52z\" (UniqueName: \"kubernetes.io/projected/bed13960-e242-4593-a44d-da28a74d4b76-kube-api-access-fl52z\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426571 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-config-data\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426591 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxwzk\" (UniqueName: \"kubernetes.io/projected/10ad3952-c878-4643-ab1d-af0376d52101-kube-api-access-lxwzk\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426631 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426667 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426686 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.426711 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.427452 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-config\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.428724 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-nb\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.429249 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-swift-storage-0\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.430132 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-svc\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.430522 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.430967 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-sb\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.431955 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-config-data\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.435701 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.446152 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fl52z\" (UniqueName: \"kubernetes.io/projected/bed13960-e242-4593-a44d-da28a74d4b76-kube-api-access-fl52z\") pod \"nova-scheduler-0\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.451197 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjvqt\" (UniqueName: \"kubernetes.io/projected/cec12aaf-a087-48db-b983-9d75767c38d1-kube-api-access-rjvqt\") pod \"dnsmasq-dns-64dbf5859c-znqsl\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.508921 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.527954 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.527999 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.528081 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxwzk\" (UniqueName: \"kubernetes.io/projected/10ad3952-c878-4643-ab1d-af0376d52101-kube-api-access-lxwzk\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.535338 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.542601 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxwzk\" (UniqueName: \"kubernetes.io/projected/10ad3952-c878-4643-ab1d-af0376d52101-kube-api-access-lxwzk\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.546828 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.630140 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.693705 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.725987 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.798873 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-ljh2g"] Nov 22 09:19:35 crc kubenswrapper[4693]: W1122 09:19:35.830566 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7c78d850_6c37_4f3b_920c_d64e4c136e47.slice/crio-b0c0bb4dbf71a0473ab964c493d955ecd83519c9ae39ab33b143bc5ba3c25aab WatchSource:0}: Error finding container b0c0bb4dbf71a0473ab964c493d955ecd83519c9ae39ab33b143bc5ba3c25aab: Status 404 returned error can't find the container with id b0c0bb4dbf71a0473ab964c493d955ecd83519c9ae39ab33b143bc5ba3c25aab Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.908867 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tg5lr"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.910253 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.912781 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.913015 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.927320 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:35 crc kubenswrapper[4693]: I1122 09:19:35.932810 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tg5lr"] Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.012505 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:36 crc kubenswrapper[4693]: W1122 09:19:36.015510 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16b10e86_b376_49d8_825e_219b1e61e7ee.slice/crio-715ab217a2c19ba491d658b41c576c70726825cd53fa8f19c2389995cdb6c4af WatchSource:0}: Error finding container 715ab217a2c19ba491d658b41c576c70726825cd53fa8f19c2389995cdb6c4af: Status 404 returned error can't find the container with id 715ab217a2c19ba491d658b41c576c70726825cd53fa8f19c2389995cdb6c4af Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.045677 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.045759 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ds8v\" (UniqueName: \"kubernetes.io/projected/310ec6d0-8daa-42c9-ac3e-96f791743124-kube-api-access-8ds8v\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.046138 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-config-data\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.046343 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-scripts\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: W1122 09:19:36.146666 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcec12aaf_a087_48db_b983_9d75767c38d1.slice/crio-c32389d2f5c246cac321e61f4ab66390989305667100a4ae26f2497267890e88 WatchSource:0}: Error finding container c32389d2f5c246cac321e61f4ab66390989305667100a4ae26f2497267890e88: Status 404 returned error can't find the container with id c32389d2f5c246cac321e61f4ab66390989305667100a4ae26f2497267890e88 Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.147631 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-scripts\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.147712 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.147740 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ds8v\" (UniqueName: \"kubernetes.io/projected/310ec6d0-8daa-42c9-ac3e-96f791743124-kube-api-access-8ds8v\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.147814 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-config-data\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.152242 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-scripts\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.152407 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-config-data\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.152507 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.156256 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-znqsl"] Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.162780 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ds8v\" (UniqueName: \"kubernetes.io/projected/310ec6d0-8daa-42c9-ac3e-96f791743124-kube-api-access-8ds8v\") pod \"nova-cell1-conductor-db-sync-tg5lr\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.228196 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.240125 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:36 crc kubenswrapper[4693]: W1122 09:19:36.252924 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podbed13960_e242_4593_a44d_da28a74d4b76.slice/crio-1255038eb1df14d461b9ce86a27742a3bf94faee1095f8416ba47f6768684823 WatchSource:0}: Error finding container 1255038eb1df14d461b9ce86a27742a3bf94faee1095f8416ba47f6768684823: Status 404 returned error can't find the container with id 1255038eb1df14d461b9ce86a27742a3bf94faee1095f8416ba47f6768684823 Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.301617 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.314973 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ljh2g" event={"ID":"7c78d850-6c37-4f3b-920c-d64e4c136e47","Type":"ContainerStarted","Data":"edc14c25cacc83c8e05fa203ad44366adb22cef814080ee11e22e32af667b51a"} Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.315133 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ljh2g" event={"ID":"7c78d850-6c37-4f3b-920c-d64e4c136e47","Type":"ContainerStarted","Data":"b0c0bb4dbf71a0473ab964c493d955ecd83519c9ae39ab33b143bc5ba3c25aab"} Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.317561 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3641f20-af8a-4a72-8a9a-11b372eef017","Type":"ContainerStarted","Data":"ccc7b4182aee7eebe25845a5ba6df55e357bb90a47612548030958eee9f111b8"} Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.325126 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" event={"ID":"cec12aaf-a087-48db-b983-9d75767c38d1","Type":"ContainerStarted","Data":"c32389d2f5c246cac321e61f4ab66390989305667100a4ae26f2497267890e88"} Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.333057 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16b10e86-b376-49d8-825e-219b1e61e7ee","Type":"ContainerStarted","Data":"715ab217a2c19ba491d658b41c576c70726825cd53fa8f19c2389995cdb6c4af"} Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.333353 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-ljh2g" podStartSLOduration=2.333336823 podStartE2EDuration="2.333336823s" podCreationTimestamp="2025-11-22 09:19:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:36.329383567 +0000 UTC m=+972.471885858" watchObservedRunningTime="2025-11-22 09:19:36.333336823 +0000 UTC m=+972.475839114" Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.348695 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bed13960-e242-4593-a44d-da28a74d4b76","Type":"ContainerStarted","Data":"1255038eb1df14d461b9ce86a27742a3bf94faee1095f8416ba47f6768684823"} Nov 22 09:19:36 crc kubenswrapper[4693]: I1122 09:19:36.659320 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tg5lr"] Nov 22 09:19:36 crc kubenswrapper[4693]: W1122 09:19:36.667375 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod310ec6d0_8daa_42c9_ac3e_96f791743124.slice/crio-706d25f94622baf6b53e3943327d37af64d652eeb5d49b9f16a061f0fdccf629 WatchSource:0}: Error finding container 706d25f94622baf6b53e3943327d37af64d652eeb5d49b9f16a061f0fdccf629: Status 404 returned error can't find the container with id 706d25f94622baf6b53e3943327d37af64d652eeb5d49b9f16a061f0fdccf629 Nov 22 09:19:37 crc kubenswrapper[4693]: I1122 09:19:37.357016 4693 generic.go:334] "Generic (PLEG): container finished" podID="cec12aaf-a087-48db-b983-9d75767c38d1" containerID="d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3" exitCode=0 Nov 22 09:19:37 crc kubenswrapper[4693]: I1122 09:19:37.357331 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" event={"ID":"cec12aaf-a087-48db-b983-9d75767c38d1","Type":"ContainerDied","Data":"d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3"} Nov 22 09:19:37 crc kubenswrapper[4693]: I1122 09:19:37.360922 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" event={"ID":"310ec6d0-8daa-42c9-ac3e-96f791743124","Type":"ContainerStarted","Data":"e2132bbd7701e65be6d6136f4683600c378e044e916742cd455bd97bef24f4d9"} Nov 22 09:19:37 crc kubenswrapper[4693]: I1122 09:19:37.360958 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" event={"ID":"310ec6d0-8daa-42c9-ac3e-96f791743124","Type":"ContainerStarted","Data":"706d25f94622baf6b53e3943327d37af64d652eeb5d49b9f16a061f0fdccf629"} Nov 22 09:19:37 crc kubenswrapper[4693]: I1122 09:19:37.364448 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"10ad3952-c878-4643-ab1d-af0376d52101","Type":"ContainerStarted","Data":"b3b61fe5acd4a72bde2b732272d3a93918055e6f2ab308062ede3f77880bf598"} Nov 22 09:19:37 crc kubenswrapper[4693]: I1122 09:19:37.403123 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" podStartSLOduration=2.403105863 podStartE2EDuration="2.403105863s" podCreationTimestamp="2025-11-22 09:19:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:37.399128943 +0000 UTC m=+973.541631234" watchObservedRunningTime="2025-11-22 09:19:37.403105863 +0000 UTC m=+973.545608154" Nov 22 09:19:38 crc kubenswrapper[4693]: I1122 09:19:38.373968 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" event={"ID":"cec12aaf-a087-48db-b983-9d75767c38d1","Type":"ContainerStarted","Data":"a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc"} Nov 22 09:19:38 crc kubenswrapper[4693]: I1122 09:19:38.392587 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" podStartSLOduration=3.3925726640000002 podStartE2EDuration="3.392572664s" podCreationTimestamp="2025-11-22 09:19:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:38.385767636 +0000 UTC m=+974.528269927" watchObservedRunningTime="2025-11-22 09:19:38.392572664 +0000 UTC m=+974.535074954" Nov 22 09:19:38 crc kubenswrapper[4693]: I1122 09:19:38.723300 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:38 crc kubenswrapper[4693]: I1122 09:19:38.731025 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:19:39 crc kubenswrapper[4693]: I1122 09:19:39.385620 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.403602 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bed13960-e242-4593-a44d-da28a74d4b76","Type":"ContainerStarted","Data":"21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1"} Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.408821 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"10ad3952-c878-4643-ab1d-af0376d52101","Type":"ContainerStarted","Data":"479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143"} Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.408945 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="10ad3952-c878-4643-ab1d-af0376d52101" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143" gracePeriod=30 Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.413824 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3641f20-af8a-4a72-8a9a-11b372eef017","Type":"ContainerStarted","Data":"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a"} Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.418582 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.6653347699999999 podStartE2EDuration="5.418573849s" podCreationTimestamp="2025-11-22 09:19:35 +0000 UTC" firstStartedPulling="2025-11-22 09:19:36.263372496 +0000 UTC m=+972.405874787" lastFinishedPulling="2025-11-22 09:19:40.016611574 +0000 UTC m=+976.159113866" observedRunningTime="2025-11-22 09:19:40.416393447 +0000 UTC m=+976.558895728" watchObservedRunningTime="2025-11-22 09:19:40.418573849 +0000 UTC m=+976.561076139" Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.419447 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16b10e86-b376-49d8-825e-219b1e61e7ee","Type":"ContainerStarted","Data":"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f"} Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.430057 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.7425158010000001 podStartE2EDuration="5.430041107s" podCreationTimestamp="2025-11-22 09:19:35 +0000 UTC" firstStartedPulling="2025-11-22 09:19:36.329463568 +0000 UTC m=+972.471965859" lastFinishedPulling="2025-11-22 09:19:40.016988874 +0000 UTC m=+976.159491165" observedRunningTime="2025-11-22 09:19:40.42746966 +0000 UTC m=+976.569971951" watchObservedRunningTime="2025-11-22 09:19:40.430041107 +0000 UTC m=+976.572543398" Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.694518 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 09:19:40 crc kubenswrapper[4693]: I1122 09:19:40.727253 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.430897 4693 generic.go:334] "Generic (PLEG): container finished" podID="310ec6d0-8daa-42c9-ac3e-96f791743124" containerID="e2132bbd7701e65be6d6136f4683600c378e044e916742cd455bd97bef24f4d9" exitCode=0 Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.430957 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" event={"ID":"310ec6d0-8daa-42c9-ac3e-96f791743124","Type":"ContainerDied","Data":"e2132bbd7701e65be6d6136f4683600c378e044e916742cd455bd97bef24f4d9"} Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.435114 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16b10e86-b376-49d8-825e-219b1e61e7ee","Type":"ContainerStarted","Data":"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88"} Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.435210 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-log" containerID="cri-o://01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f" gracePeriod=30 Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.435426 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-metadata" containerID="cri-o://112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88" gracePeriod=30 Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.438412 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3641f20-af8a-4a72-8a9a-11b372eef017","Type":"ContainerStarted","Data":"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce"} Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.485599 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.390798829 podStartE2EDuration="6.485585127s" podCreationTimestamp="2025-11-22 09:19:35 +0000 UTC" firstStartedPulling="2025-11-22 09:19:35.926964052 +0000 UTC m=+972.069466343" lastFinishedPulling="2025-11-22 09:19:40.02175035 +0000 UTC m=+976.164252641" observedRunningTime="2025-11-22 09:19:41.482631403 +0000 UTC m=+977.625133693" watchObservedRunningTime="2025-11-22 09:19:41.485585127 +0000 UTC m=+977.628087418" Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.486361 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.483874423 podStartE2EDuration="6.48635755s" podCreationTimestamp="2025-11-22 09:19:35 +0000 UTC" firstStartedPulling="2025-11-22 09:19:36.018346291 +0000 UTC m=+972.160848582" lastFinishedPulling="2025-11-22 09:19:40.020829417 +0000 UTC m=+976.163331709" observedRunningTime="2025-11-22 09:19:41.470494464 +0000 UTC m=+977.612996756" watchObservedRunningTime="2025-11-22 09:19:41.48635755 +0000 UTC m=+977.628859841" Nov 22 09:19:41 crc kubenswrapper[4693]: I1122 09:19:41.978932 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.073238 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16b10e86-b376-49d8-825e-219b1e61e7ee-logs\") pod \"16b10e86-b376-49d8-825e-219b1e61e7ee\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.073315 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-combined-ca-bundle\") pod \"16b10e86-b376-49d8-825e-219b1e61e7ee\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.073344 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7k49g\" (UniqueName: \"kubernetes.io/projected/16b10e86-b376-49d8-825e-219b1e61e7ee-kube-api-access-7k49g\") pod \"16b10e86-b376-49d8-825e-219b1e61e7ee\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.073379 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-config-data\") pod \"16b10e86-b376-49d8-825e-219b1e61e7ee\" (UID: \"16b10e86-b376-49d8-825e-219b1e61e7ee\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.073584 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/16b10e86-b376-49d8-825e-219b1e61e7ee-logs" (OuterVolumeSpecName: "logs") pod "16b10e86-b376-49d8-825e-219b1e61e7ee" (UID: "16b10e86-b376-49d8-825e-219b1e61e7ee"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.094160 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/16b10e86-b376-49d8-825e-219b1e61e7ee-kube-api-access-7k49g" (OuterVolumeSpecName: "kube-api-access-7k49g") pod "16b10e86-b376-49d8-825e-219b1e61e7ee" (UID: "16b10e86-b376-49d8-825e-219b1e61e7ee"). InnerVolumeSpecName "kube-api-access-7k49g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.097655 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "16b10e86-b376-49d8-825e-219b1e61e7ee" (UID: "16b10e86-b376-49d8-825e-219b1e61e7ee"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.100061 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-config-data" (OuterVolumeSpecName: "config-data") pod "16b10e86-b376-49d8-825e-219b1e61e7ee" (UID: "16b10e86-b376-49d8-825e-219b1e61e7ee"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.175395 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.175455 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7k49g\" (UniqueName: \"kubernetes.io/projected/16b10e86-b376-49d8-825e-219b1e61e7ee-kube-api-access-7k49g\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.175470 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/16b10e86-b376-49d8-825e-219b1e61e7ee-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.175481 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/16b10e86-b376-49d8-825e-219b1e61e7ee-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.446430 4693 generic.go:334] "Generic (PLEG): container finished" podID="7c78d850-6c37-4f3b-920c-d64e4c136e47" containerID="edc14c25cacc83c8e05fa203ad44366adb22cef814080ee11e22e32af667b51a" exitCode=0 Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.446507 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ljh2g" event={"ID":"7c78d850-6c37-4f3b-920c-d64e4c136e47","Type":"ContainerDied","Data":"edc14c25cacc83c8e05fa203ad44366adb22cef814080ee11e22e32af667b51a"} Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.449990 4693 generic.go:334] "Generic (PLEG): container finished" podID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerID="112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88" exitCode=0 Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.450014 4693 generic.go:334] "Generic (PLEG): container finished" podID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerID="01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f" exitCode=143 Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.450047 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.450090 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16b10e86-b376-49d8-825e-219b1e61e7ee","Type":"ContainerDied","Data":"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88"} Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.450131 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16b10e86-b376-49d8-825e-219b1e61e7ee","Type":"ContainerDied","Data":"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f"} Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.450145 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"16b10e86-b376-49d8-825e-219b1e61e7ee","Type":"ContainerDied","Data":"715ab217a2c19ba491d658b41c576c70726825cd53fa8f19c2389995cdb6c4af"} Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.450163 4693 scope.go:117] "RemoveContainer" containerID="112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.503190 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.509710 4693 scope.go:117] "RemoveContainer" containerID="01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.520524 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.530139 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:42 crc kubenswrapper[4693]: E1122 09:19:42.530520 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-log" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.530533 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-log" Nov 22 09:19:42 crc kubenswrapper[4693]: E1122 09:19:42.530551 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-metadata" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.530558 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-metadata" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.530744 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-metadata" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.530760 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" containerName="nova-metadata-log" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.531661 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.536518 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.537118 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.547466 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.552077 4693 scope.go:117] "RemoveContainer" containerID="112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88" Nov 22 09:19:42 crc kubenswrapper[4693]: E1122 09:19:42.554212 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88\": container with ID starting with 112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88 not found: ID does not exist" containerID="112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.554243 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88"} err="failed to get container status \"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88\": rpc error: code = NotFound desc = could not find container \"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88\": container with ID starting with 112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88 not found: ID does not exist" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.554280 4693 scope.go:117] "RemoveContainer" containerID="01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f" Nov 22 09:19:42 crc kubenswrapper[4693]: E1122 09:19:42.557234 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f\": container with ID starting with 01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f not found: ID does not exist" containerID="01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.557263 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f"} err="failed to get container status \"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f\": rpc error: code = NotFound desc = could not find container \"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f\": container with ID starting with 01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f not found: ID does not exist" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.557293 4693 scope.go:117] "RemoveContainer" containerID="112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.557798 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88"} err="failed to get container status \"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88\": rpc error: code = NotFound desc = could not find container \"112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88\": container with ID starting with 112e77f0c8bd90058db75998ec16a5c1347bf94d47116c7e0fbc0cae50457c88 not found: ID does not exist" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.557818 4693 scope.go:117] "RemoveContainer" containerID="01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.558171 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f"} err="failed to get container status \"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f\": rpc error: code = NotFound desc = could not find container \"01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f\": container with ID starting with 01b5a0cec08ee8b4e03d57b8b7aa90592e509b0c5e9da3eec8e2d6243b66e53f not found: ID does not exist" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.584512 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7748x\" (UniqueName: \"kubernetes.io/projected/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-kube-api-access-7748x\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.584590 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.584633 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-logs\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.584663 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-config-data\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.584782 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.687448 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.687579 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7748x\" (UniqueName: \"kubernetes.io/projected/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-kube-api-access-7748x\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.687634 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.687687 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-logs\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.687730 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-config-data\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.688577 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-logs\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.694084 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.694225 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-config-data\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.697821 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.704946 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7748x\" (UniqueName: \"kubernetes.io/projected/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-kube-api-access-7748x\") pod \"nova-metadata-0\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.786925 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.857966 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.889021 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-combined-ca-bundle\") pod \"310ec6d0-8daa-42c9-ac3e-96f791743124\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.889243 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-scripts\") pod \"310ec6d0-8daa-42c9-ac3e-96f791743124\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.889310 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ds8v\" (UniqueName: \"kubernetes.io/projected/310ec6d0-8daa-42c9-ac3e-96f791743124-kube-api-access-8ds8v\") pod \"310ec6d0-8daa-42c9-ac3e-96f791743124\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.889344 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-config-data\") pod \"310ec6d0-8daa-42c9-ac3e-96f791743124\" (UID: \"310ec6d0-8daa-42c9-ac3e-96f791743124\") " Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.893798 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/310ec6d0-8daa-42c9-ac3e-96f791743124-kube-api-access-8ds8v" (OuterVolumeSpecName: "kube-api-access-8ds8v") pod "310ec6d0-8daa-42c9-ac3e-96f791743124" (UID: "310ec6d0-8daa-42c9-ac3e-96f791743124"). InnerVolumeSpecName "kube-api-access-8ds8v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.896965 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-scripts" (OuterVolumeSpecName: "scripts") pod "310ec6d0-8daa-42c9-ac3e-96f791743124" (UID: "310ec6d0-8daa-42c9-ac3e-96f791743124"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.913801 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-config-data" (OuterVolumeSpecName: "config-data") pod "310ec6d0-8daa-42c9-ac3e-96f791743124" (UID: "310ec6d0-8daa-42c9-ac3e-96f791743124"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.914657 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "310ec6d0-8daa-42c9-ac3e-96f791743124" (UID: "310ec6d0-8daa-42c9-ac3e-96f791743124"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.991568 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.991606 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.991638 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ds8v\" (UniqueName: \"kubernetes.io/projected/310ec6d0-8daa-42c9-ac3e-96f791743124-kube-api-access-8ds8v\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:42 crc kubenswrapper[4693]: I1122 09:19:42.991651 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/310ec6d0-8daa-42c9-ac3e-96f791743124-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.255970 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.465590 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" event={"ID":"310ec6d0-8daa-42c9-ac3e-96f791743124","Type":"ContainerDied","Data":"706d25f94622baf6b53e3943327d37af64d652eeb5d49b9f16a061f0fdccf629"} Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.465642 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="706d25f94622baf6b53e3943327d37af64d652eeb5d49b9f16a061f0fdccf629" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.465741 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-tg5lr" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.469503 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1","Type":"ContainerStarted","Data":"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9"} Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.469576 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1","Type":"ContainerStarted","Data":"4648726c00de37d891f4c719cef64475224327887388324a3a8159726ece2e3d"} Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.505997 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 09:19:43 crc kubenswrapper[4693]: E1122 09:19:43.506371 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="310ec6d0-8daa-42c9-ac3e-96f791743124" containerName="nova-cell1-conductor-db-sync" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.506384 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="310ec6d0-8daa-42c9-ac3e-96f791743124" containerName="nova-cell1-conductor-db-sync" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.506585 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="310ec6d0-8daa-42c9-ac3e-96f791743124" containerName="nova-cell1-conductor-db-sync" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.507164 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.508575 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.518904 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.604376 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca2d255f-4890-4fa9-85e7-47ab34607956-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.604819 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqcbr\" (UniqueName: \"kubernetes.io/projected/ca2d255f-4890-4fa9-85e7-47ab34607956-kube-api-access-xqcbr\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.604952 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d255f-4890-4fa9-85e7-47ab34607956-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.707242 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqcbr\" (UniqueName: \"kubernetes.io/projected/ca2d255f-4890-4fa9-85e7-47ab34607956-kube-api-access-xqcbr\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.707563 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d255f-4890-4fa9-85e7-47ab34607956-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.708017 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca2d255f-4890-4fa9-85e7-47ab34607956-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.712627 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ca2d255f-4890-4fa9-85e7-47ab34607956-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.713748 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ca2d255f-4890-4fa9-85e7-47ab34607956-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.721023 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.722941 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqcbr\" (UniqueName: \"kubernetes.io/projected/ca2d255f-4890-4fa9-85e7-47ab34607956-kube-api-access-xqcbr\") pod \"nova-cell1-conductor-0\" (UID: \"ca2d255f-4890-4fa9-85e7-47ab34607956\") " pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.809490 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tr5v\" (UniqueName: \"kubernetes.io/projected/7c78d850-6c37-4f3b-920c-d64e4c136e47-kube-api-access-9tr5v\") pod \"7c78d850-6c37-4f3b-920c-d64e4c136e47\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.809697 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-scripts\") pod \"7c78d850-6c37-4f3b-920c-d64e4c136e47\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.809805 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-config-data\") pod \"7c78d850-6c37-4f3b-920c-d64e4c136e47\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.810171 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-combined-ca-bundle\") pod \"7c78d850-6c37-4f3b-920c-d64e4c136e47\" (UID: \"7c78d850-6c37-4f3b-920c-d64e4c136e47\") " Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.816304 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7c78d850-6c37-4f3b-920c-d64e4c136e47-kube-api-access-9tr5v" (OuterVolumeSpecName: "kube-api-access-9tr5v") pod "7c78d850-6c37-4f3b-920c-d64e4c136e47" (UID: "7c78d850-6c37-4f3b-920c-d64e4c136e47"). InnerVolumeSpecName "kube-api-access-9tr5v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.817093 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tr5v\" (UniqueName: \"kubernetes.io/projected/7c78d850-6c37-4f3b-920c-d64e4c136e47-kube-api-access-9tr5v\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.817774 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-scripts" (OuterVolumeSpecName: "scripts") pod "7c78d850-6c37-4f3b-920c-d64e4c136e47" (UID: "7c78d850-6c37-4f3b-920c-d64e4c136e47"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.837889 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-config-data" (OuterVolumeSpecName: "config-data") pod "7c78d850-6c37-4f3b-920c-d64e4c136e47" (UID: "7c78d850-6c37-4f3b-920c-d64e4c136e47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.838253 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7c78d850-6c37-4f3b-920c-d64e4c136e47" (UID: "7c78d850-6c37-4f3b-920c-d64e4c136e47"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.844798 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.920201 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.920403 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:43 crc kubenswrapper[4693]: I1122 09:19:43.920416 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7c78d850-6c37-4f3b-920c-d64e4c136e47-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.160387 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="16b10e86-b376-49d8-825e-219b1e61e7ee" path="/var/lib/kubelet/pods/16b10e86-b376-49d8-825e-219b1e61e7ee/volumes" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.228689 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.479242 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-ljh2g" event={"ID":"7c78d850-6c37-4f3b-920c-d64e4c136e47","Type":"ContainerDied","Data":"b0c0bb4dbf71a0473ab964c493d955ecd83519c9ae39ab33b143bc5ba3c25aab"} Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.479490 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0c0bb4dbf71a0473ab964c493d955ecd83519c9ae39ab33b143bc5ba3c25aab" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.479566 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-ljh2g" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.482487 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ca2d255f-4890-4fa9-85e7-47ab34607956","Type":"ContainerStarted","Data":"0d53595f9da84d2d134905155f537638ebdd3694c59c2be9806cf6ef6cd90941"} Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.482527 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ca2d255f-4890-4fa9-85e7-47ab34607956","Type":"ContainerStarted","Data":"557fd37656bd5c390d7ba56003c5304edd8f114c4699305c231a74dc1c967f70"} Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.482685 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.484146 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1","Type":"ContainerStarted","Data":"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e"} Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.500333 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=1.500316941 podStartE2EDuration="1.500316941s" podCreationTimestamp="2025-11-22 09:19:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:44.499150498 +0000 UTC m=+980.641652790" watchObservedRunningTime="2025-11-22 09:19:44.500316941 +0000 UTC m=+980.642819233" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.516406 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.516387818 podStartE2EDuration="2.516387818s" podCreationTimestamp="2025-11-22 09:19:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:44.511656299 +0000 UTC m=+980.654158579" watchObservedRunningTime="2025-11-22 09:19:44.516387818 +0000 UTC m=+980.658890110" Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.640046 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.640254 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-log" containerID="cri-o://8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a" gracePeriod=30 Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.640484 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-api" containerID="cri-o://b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce" gracePeriod=30 Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.647284 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.647448 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="bed13960-e242-4593-a44d-da28a74d4b76" containerName="nova-scheduler-scheduler" containerID="cri-o://21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1" gracePeriod=30 Nov 22 09:19:44 crc kubenswrapper[4693]: I1122 09:19:44.752011 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.134113 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.253173 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-combined-ca-bundle\") pod \"b3641f20-af8a-4a72-8a9a-11b372eef017\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.253230 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-94kkx\" (UniqueName: \"kubernetes.io/projected/b3641f20-af8a-4a72-8a9a-11b372eef017-kube-api-access-94kkx\") pod \"b3641f20-af8a-4a72-8a9a-11b372eef017\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.253324 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3641f20-af8a-4a72-8a9a-11b372eef017-logs\") pod \"b3641f20-af8a-4a72-8a9a-11b372eef017\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.254591 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-config-data\") pod \"b3641f20-af8a-4a72-8a9a-11b372eef017\" (UID: \"b3641f20-af8a-4a72-8a9a-11b372eef017\") " Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.255838 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b3641f20-af8a-4a72-8a9a-11b372eef017-logs" (OuterVolumeSpecName: "logs") pod "b3641f20-af8a-4a72-8a9a-11b372eef017" (UID: "b3641f20-af8a-4a72-8a9a-11b372eef017"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.268522 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3641f20-af8a-4a72-8a9a-11b372eef017-kube-api-access-94kkx" (OuterVolumeSpecName: "kube-api-access-94kkx") pod "b3641f20-af8a-4a72-8a9a-11b372eef017" (UID: "b3641f20-af8a-4a72-8a9a-11b372eef017"). InnerVolumeSpecName "kube-api-access-94kkx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.279000 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3641f20-af8a-4a72-8a9a-11b372eef017" (UID: "b3641f20-af8a-4a72-8a9a-11b372eef017"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.292046 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-config-data" (OuterVolumeSpecName: "config-data") pod "b3641f20-af8a-4a72-8a9a-11b372eef017" (UID: "b3641f20-af8a-4a72-8a9a-11b372eef017"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.357769 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.357835 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-94kkx\" (UniqueName: \"kubernetes.io/projected/b3641f20-af8a-4a72-8a9a-11b372eef017-kube-api-access-94kkx\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.357866 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b3641f20-af8a-4a72-8a9a-11b372eef017-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.357876 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b3641f20-af8a-4a72-8a9a-11b372eef017-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.497682 4693 generic.go:334] "Generic (PLEG): container finished" podID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerID="b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce" exitCode=0 Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.497998 4693 generic.go:334] "Generic (PLEG): container finished" podID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerID="8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a" exitCode=143 Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.497799 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.497747 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3641f20-af8a-4a72-8a9a-11b372eef017","Type":"ContainerDied","Data":"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce"} Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.498155 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3641f20-af8a-4a72-8a9a-11b372eef017","Type":"ContainerDied","Data":"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a"} Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.498168 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b3641f20-af8a-4a72-8a9a-11b372eef017","Type":"ContainerDied","Data":"ccc7b4182aee7eebe25845a5ba6df55e357bb90a47612548030958eee9f111b8"} Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.498185 4693 scope.go:117] "RemoveContainer" containerID="b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.534906 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.543773 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.550430 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:45 crc kubenswrapper[4693]: E1122 09:19:45.550923 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-log" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.550956 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-log" Nov 22 09:19:45 crc kubenswrapper[4693]: E1122 09:19:45.550968 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7c78d850-6c37-4f3b-920c-d64e4c136e47" containerName="nova-manage" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.550974 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7c78d850-6c37-4f3b-920c-d64e4c136e47" containerName="nova-manage" Nov 22 09:19:45 crc kubenswrapper[4693]: E1122 09:19:45.551036 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-api" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.551043 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-api" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.551265 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7c78d850-6c37-4f3b-920c-d64e4c136e47" containerName="nova-manage" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.551298 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-api" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.551295 4693 scope.go:117] "RemoveContainer" containerID="8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.551311 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" containerName="nova-api-log" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.552650 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.558972 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.576678 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.584734 4693 scope.go:117] "RemoveContainer" containerID="b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce" Nov 22 09:19:45 crc kubenswrapper[4693]: E1122 09:19:45.585826 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce\": container with ID starting with b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce not found: ID does not exist" containerID="b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.585952 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce"} err="failed to get container status \"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce\": rpc error: code = NotFound desc = could not find container \"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce\": container with ID starting with b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce not found: ID does not exist" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.585985 4693 scope.go:117] "RemoveContainer" containerID="8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a" Nov 22 09:19:45 crc kubenswrapper[4693]: E1122 09:19:45.586350 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a\": container with ID starting with 8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a not found: ID does not exist" containerID="8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.586381 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a"} err="failed to get container status \"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a\": rpc error: code = NotFound desc = could not find container \"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a\": container with ID starting with 8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a not found: ID does not exist" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.586408 4693 scope.go:117] "RemoveContainer" containerID="b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.586633 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce"} err="failed to get container status \"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce\": rpc error: code = NotFound desc = could not find container \"b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce\": container with ID starting with b1063141ad7e50b8288319d23cc245985f691c4dc05fd7261c5bcee7ba6cbdce not found: ID does not exist" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.586655 4693 scope.go:117] "RemoveContainer" containerID="8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.586884 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a"} err="failed to get container status \"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a\": rpc error: code = NotFound desc = could not find container \"8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a\": container with ID starting with 8e497c37309e8b52bb2b35f79744630b602b0ffe089080ac232e90f2e6896f6a not found: ID does not exist" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.631744 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.665524 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.665642 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-config-data\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.665690 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/effdb574-9196-4c67-92d6-d8101efd3780-logs\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.665784 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wwgqp\" (UniqueName: \"kubernetes.io/projected/effdb574-9196-4c67-92d6-d8101efd3780-kube-api-access-wwgqp\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.689791 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-d2bqt"] Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.690105 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="dnsmasq-dns" containerID="cri-o://a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6" gracePeriod=10 Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.767580 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wwgqp\" (UniqueName: \"kubernetes.io/projected/effdb574-9196-4c67-92d6-d8101efd3780-kube-api-access-wwgqp\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.767734 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.767811 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-config-data\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.767855 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/effdb574-9196-4c67-92d6-d8101efd3780-logs\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.770293 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/effdb574-9196-4c67-92d6-d8101efd3780-logs\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.772688 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.773167 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-config-data\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.783780 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wwgqp\" (UniqueName: \"kubernetes.io/projected/effdb574-9196-4c67-92d6-d8101efd3780-kube-api-access-wwgqp\") pod \"nova-api-0\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " pod="openstack/nova-api-0" Nov 22 09:19:45 crc kubenswrapper[4693]: I1122 09:19:45.880469 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.057394 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.157963 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3641f20-af8a-4a72-8a9a-11b372eef017" path="/var/lib/kubelet/pods/b3641f20-af8a-4a72-8a9a-11b372eef017/volumes" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.175327 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-svc\") pod \"8187f5e4-a62d-444b-99d8-694e067cb8f4\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.175378 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-config\") pod \"8187f5e4-a62d-444b-99d8-694e067cb8f4\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.175421 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9jm8l\" (UniqueName: \"kubernetes.io/projected/8187f5e4-a62d-444b-99d8-694e067cb8f4-kube-api-access-9jm8l\") pod \"8187f5e4-a62d-444b-99d8-694e067cb8f4\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.175491 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-nb\") pod \"8187f5e4-a62d-444b-99d8-694e067cb8f4\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.175548 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-sb\") pod \"8187f5e4-a62d-444b-99d8-694e067cb8f4\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.175782 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-swift-storage-0\") pod \"8187f5e4-a62d-444b-99d8-694e067cb8f4\" (UID: \"8187f5e4-a62d-444b-99d8-694e067cb8f4\") " Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.180581 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8187f5e4-a62d-444b-99d8-694e067cb8f4-kube-api-access-9jm8l" (OuterVolumeSpecName: "kube-api-access-9jm8l") pod "8187f5e4-a62d-444b-99d8-694e067cb8f4" (UID: "8187f5e4-a62d-444b-99d8-694e067cb8f4"). InnerVolumeSpecName "kube-api-access-9jm8l". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.217922 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8187f5e4-a62d-444b-99d8-694e067cb8f4" (UID: "8187f5e4-a62d-444b-99d8-694e067cb8f4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.222808 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "8187f5e4-a62d-444b-99d8-694e067cb8f4" (UID: "8187f5e4-a62d-444b-99d8-694e067cb8f4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.224205 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "8187f5e4-a62d-444b-99d8-694e067cb8f4" (UID: "8187f5e4-a62d-444b-99d8-694e067cb8f4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.225080 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-config" (OuterVolumeSpecName: "config") pod "8187f5e4-a62d-444b-99d8-694e067cb8f4" (UID: "8187f5e4-a62d-444b-99d8-694e067cb8f4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.228967 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "8187f5e4-a62d-444b-99d8-694e067cb8f4" (UID: "8187f5e4-a62d-444b-99d8-694e067cb8f4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.278325 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.278355 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.278365 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9jm8l\" (UniqueName: \"kubernetes.io/projected/8187f5e4-a62d-444b-99d8-694e067cb8f4-kube-api-access-9jm8l\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.278376 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.278384 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.278392 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/8187f5e4-a62d-444b-99d8-694e067cb8f4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.301374 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.508245 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"effdb574-9196-4c67-92d6-d8101efd3780","Type":"ContainerStarted","Data":"043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868"} Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.508298 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"effdb574-9196-4c67-92d6-d8101efd3780","Type":"ContainerStarted","Data":"7398e9abe24a1fcd96c10832b194711c35e0cf68d50eef486387132cd17433f5"} Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.509929 4693 generic.go:334] "Generic (PLEG): container finished" podID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerID="a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6" exitCode=0 Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.509986 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.510006 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" event={"ID":"8187f5e4-a62d-444b-99d8-694e067cb8f4","Type":"ContainerDied","Data":"a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6"} Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.510049 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" event={"ID":"8187f5e4-a62d-444b-99d8-694e067cb8f4","Type":"ContainerDied","Data":"e68fcd8f44a258f87fd1f60db2c0e6b6d035175373502543a802dba50edc2849"} Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.510071 4693 scope.go:117] "RemoveContainer" containerID="a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.511863 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-log" containerID="cri-o://941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9" gracePeriod=30 Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.511894 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-metadata" containerID="cri-o://19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e" gracePeriod=30 Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.567101 4693 scope.go:117] "RemoveContainer" containerID="774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.567623 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-d2bqt"] Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.579117 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7965876c4f-d2bqt"] Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.589106 4693 scope.go:117] "RemoveContainer" containerID="a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6" Nov 22 09:19:46 crc kubenswrapper[4693]: E1122 09:19:46.590111 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6\": container with ID starting with a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6 not found: ID does not exist" containerID="a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.590146 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6"} err="failed to get container status \"a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6\": rpc error: code = NotFound desc = could not find container \"a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6\": container with ID starting with a2ce88241abd31cc34e0a43d4a2a84b3f7ad8a4dddf30af02f0944e0506abad6 not found: ID does not exist" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.590169 4693 scope.go:117] "RemoveContainer" containerID="774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f" Nov 22 09:19:46 crc kubenswrapper[4693]: E1122 09:19:46.590529 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f\": container with ID starting with 774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f not found: ID does not exist" containerID="774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.590563 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f"} err="failed to get container status \"774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f\": rpc error: code = NotFound desc = could not find container \"774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f\": container with ID starting with 774339702da2ce08647105ce54ccc71347c2d91a70371e7ce0e06716c5a5618f not found: ID does not exist" Nov 22 09:19:46 crc kubenswrapper[4693]: I1122 09:19:46.954647 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.095175 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-combined-ca-bundle\") pod \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.095333 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-config-data\") pod \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.095369 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-logs\") pod \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.095407 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-nova-metadata-tls-certs\") pod \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.095434 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7748x\" (UniqueName: \"kubernetes.io/projected/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-kube-api-access-7748x\") pod \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\" (UID: \"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.099165 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-logs" (OuterVolumeSpecName: "logs") pod "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" (UID: "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.099384 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-kube-api-access-7748x" (OuterVolumeSpecName: "kube-api-access-7748x") pod "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" (UID: "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1"). InnerVolumeSpecName "kube-api-access-7748x". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.116436 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-config-data" (OuterVolumeSpecName: "config-data") pod "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" (UID: "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.118404 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" (UID: "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.131319 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" (UID: "7869c8b2-ca55-40ed-a8c6-15e5ae3542f1"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.196309 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.196412 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.196480 4693 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.196618 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7748x\" (UniqueName: \"kubernetes.io/projected/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-kube-api-access-7748x\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.196671 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.457601 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.524702 4693 generic.go:334] "Generic (PLEG): container finished" podID="bed13960-e242-4593-a44d-da28a74d4b76" containerID="21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1" exitCode=0 Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.524816 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bed13960-e242-4593-a44d-da28a74d4b76","Type":"ContainerDied","Data":"21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1"} Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.524897 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"bed13960-e242-4593-a44d-da28a74d4b76","Type":"ContainerDied","Data":"1255038eb1df14d461b9ce86a27742a3bf94faee1095f8416ba47f6768684823"} Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.524920 4693 scope.go:117] "RemoveContainer" containerID="21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.525110 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.528881 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"effdb574-9196-4c67-92d6-d8101efd3780","Type":"ContainerStarted","Data":"84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3"} Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.531893 4693 generic.go:334] "Generic (PLEG): container finished" podID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerID="19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e" exitCode=0 Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.531919 4693 generic.go:334] "Generic (PLEG): container finished" podID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerID="941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9" exitCode=143 Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.531940 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1","Type":"ContainerDied","Data":"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e"} Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.531960 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1","Type":"ContainerDied","Data":"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9"} Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.531971 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"7869c8b2-ca55-40ed-a8c6-15e5ae3542f1","Type":"ContainerDied","Data":"4648726c00de37d891f4c719cef64475224327887388324a3a8159726ece2e3d"} Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.532011 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.553190 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.5531736130000002 podStartE2EDuration="2.553173613s" podCreationTimestamp="2025-11-22 09:19:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:47.545735903 +0000 UTC m=+983.688238194" watchObservedRunningTime="2025-11-22 09:19:47.553173613 +0000 UTC m=+983.695675904" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.558449 4693 scope.go:117] "RemoveContainer" containerID="21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.558907 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1\": container with ID starting with 21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1 not found: ID does not exist" containerID="21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.558947 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1"} err="failed to get container status \"21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1\": rpc error: code = NotFound desc = could not find container \"21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1\": container with ID starting with 21d207653b8ccd0c8caa0e3fc0651160b6a69115dd77e9b139d80e2f517a9fa1 not found: ID does not exist" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.558972 4693 scope.go:117] "RemoveContainer" containerID="19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.567895 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.574877 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.576755 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.577145 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-log" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577159 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-log" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.577173 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="dnsmasq-dns" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577178 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="dnsmasq-dns" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.577192 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bed13960-e242-4593-a44d-da28a74d4b76" containerName="nova-scheduler-scheduler" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577198 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="bed13960-e242-4593-a44d-da28a74d4b76" containerName="nova-scheduler-scheduler" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.577207 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-metadata" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577212 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-metadata" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.577225 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="init" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577230 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="init" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577383 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-log" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577527 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" containerName="nova-metadata-metadata" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577547 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="dnsmasq-dns" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.577554 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="bed13960-e242-4593-a44d-da28a74d4b76" containerName="nova-scheduler-scheduler" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.578713 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.580807 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.581206 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.586638 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.591158 4693 scope.go:117] "RemoveContainer" containerID="941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.606633 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fl52z\" (UniqueName: \"kubernetes.io/projected/bed13960-e242-4593-a44d-da28a74d4b76-kube-api-access-fl52z\") pod \"bed13960-e242-4593-a44d-da28a74d4b76\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.606686 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-combined-ca-bundle\") pod \"bed13960-e242-4593-a44d-da28a74d4b76\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.606724 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-config-data\") pod \"bed13960-e242-4593-a44d-da28a74d4b76\" (UID: \"bed13960-e242-4593-a44d-da28a74d4b76\") " Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.612835 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bed13960-e242-4593-a44d-da28a74d4b76-kube-api-access-fl52z" (OuterVolumeSpecName: "kube-api-access-fl52z") pod "bed13960-e242-4593-a44d-da28a74d4b76" (UID: "bed13960-e242-4593-a44d-da28a74d4b76"). InnerVolumeSpecName "kube-api-access-fl52z". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.624689 4693 scope.go:117] "RemoveContainer" containerID="19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.625063 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e\": container with ID starting with 19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e not found: ID does not exist" containerID="19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.625098 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e"} err="failed to get container status \"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e\": rpc error: code = NotFound desc = could not find container \"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e\": container with ID starting with 19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e not found: ID does not exist" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.625125 4693 scope.go:117] "RemoveContainer" containerID="941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9" Nov 22 09:19:47 crc kubenswrapper[4693]: E1122 09:19:47.625458 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9\": container with ID starting with 941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9 not found: ID does not exist" containerID="941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.625486 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9"} err="failed to get container status \"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9\": rpc error: code = NotFound desc = could not find container \"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9\": container with ID starting with 941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9 not found: ID does not exist" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.625505 4693 scope.go:117] "RemoveContainer" containerID="19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.625792 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e"} err="failed to get container status \"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e\": rpc error: code = NotFound desc = could not find container \"19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e\": container with ID starting with 19b0dba9e323d3034b10673c37d568846f0d4ec652c13c9e387700d69241293e not found: ID does not exist" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.625816 4693 scope.go:117] "RemoveContainer" containerID="941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.626120 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9"} err="failed to get container status \"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9\": rpc error: code = NotFound desc = could not find container \"941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9\": container with ID starting with 941094a11a45b29eef7a9b792bcf8d130aa03f3cdf76a5583774fa4b94b849a9 not found: ID does not exist" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.631042 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bed13960-e242-4593-a44d-da28a74d4b76" (UID: "bed13960-e242-4593-a44d-da28a74d4b76"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.633518 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-config-data" (OuterVolumeSpecName: "config-data") pod "bed13960-e242-4593-a44d-da28a74d4b76" (UID: "bed13960-e242-4593-a44d-da28a74d4b76"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.708891 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zhn4q\" (UniqueName: \"kubernetes.io/projected/c3a0f349-b7f9-49cf-8bf7-f60253669f74-kube-api-access-zhn4q\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.708958 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.708978 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.709019 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3a0f349-b7f9-49cf-8bf7-f60253669f74-logs\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.709409 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-config-data\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.709607 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fl52z\" (UniqueName: \"kubernetes.io/projected/bed13960-e242-4593-a44d-da28a74d4b76-kube-api-access-fl52z\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.709630 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.709642 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bed13960-e242-4593-a44d-da28a74d4b76-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.815469 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.816962 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.817138 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3a0f349-b7f9-49cf-8bf7-f60253669f74-logs\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.817433 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-config-data\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.817515 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zhn4q\" (UniqueName: \"kubernetes.io/projected/c3a0f349-b7f9-49cf-8bf7-f60253669f74-kube-api-access-zhn4q\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.817515 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3a0f349-b7f9-49cf-8bf7-f60253669f74-logs\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.819482 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.820952 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-config-data\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.821717 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.834738 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zhn4q\" (UniqueName: \"kubernetes.io/projected/c3a0f349-b7f9-49cf-8bf7-f60253669f74-kube-api-access-zhn4q\") pod \"nova-metadata-0\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " pod="openstack/nova-metadata-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.852372 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.861933 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.864738 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.865801 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.869903 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.871914 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:47 crc kubenswrapper[4693]: I1122 09:19:47.900200 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.026737 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-config-data\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.026806 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqjcm\" (UniqueName: \"kubernetes.io/projected/a508ae94-3587-467b-91b7-8bbf728c1d4d-kube-api-access-zqjcm\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.026899 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.129544 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-config-data\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.129630 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqjcm\" (UniqueName: \"kubernetes.io/projected/a508ae94-3587-467b-91b7-8bbf728c1d4d-kube-api-access-zqjcm\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.129728 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.134155 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.134749 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-config-data\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.146540 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqjcm\" (UniqueName: \"kubernetes.io/projected/a508ae94-3587-467b-91b7-8bbf728c1d4d-kube-api-access-zqjcm\") pod \"nova-scheduler-0\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.158437 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7869c8b2-ca55-40ed-a8c6-15e5ae3542f1" path="/var/lib/kubelet/pods/7869c8b2-ca55-40ed-a8c6-15e5ae3542f1/volumes" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.159204 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" path="/var/lib/kubelet/pods/8187f5e4-a62d-444b-99d8-694e067cb8f4/volumes" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.159746 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bed13960-e242-4593-a44d-da28a74d4b76" path="/var/lib/kubelet/pods/bed13960-e242-4593-a44d-da28a74d4b76/volumes" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.179407 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.299816 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.546057 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3a0f349-b7f9-49cf-8bf7-f60253669f74","Type":"ContainerStarted","Data":"899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5"} Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.546407 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3a0f349-b7f9-49cf-8bf7-f60253669f74","Type":"ContainerStarted","Data":"cc9049757cbdb0fa5e9f98786dd9868956a2636302b6b9a76d91092e51c110c6"} Nov 22 09:19:48 crc kubenswrapper[4693]: I1122 09:19:48.571655 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:19:48 crc kubenswrapper[4693]: W1122 09:19:48.574704 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda508ae94_3587_467b_91b7_8bbf728c1d4d.slice/crio-37a5d975819696e1d3bb69ef8a57df049de1c288de16e87a413ab533326361c8 WatchSource:0}: Error finding container 37a5d975819696e1d3bb69ef8a57df049de1c288de16e87a413ab533326361c8: Status 404 returned error can't find the container with id 37a5d975819696e1d3bb69ef8a57df049de1c288de16e87a413ab533326361c8 Nov 22 09:19:49 crc kubenswrapper[4693]: I1122 09:19:49.560194 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a508ae94-3587-467b-91b7-8bbf728c1d4d","Type":"ContainerStarted","Data":"3d04f1b0b2cf3fca708add37a127ec4e3801b1841f8351f9da79a58158dcc5d2"} Nov 22 09:19:49 crc kubenswrapper[4693]: I1122 09:19:49.560561 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a508ae94-3587-467b-91b7-8bbf728c1d4d","Type":"ContainerStarted","Data":"37a5d975819696e1d3bb69ef8a57df049de1c288de16e87a413ab533326361c8"} Nov 22 09:19:49 crc kubenswrapper[4693]: I1122 09:19:49.565947 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3a0f349-b7f9-49cf-8bf7-f60253669f74","Type":"ContainerStarted","Data":"78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440"} Nov 22 09:19:49 crc kubenswrapper[4693]: I1122 09:19:49.582358 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.582344121 podStartE2EDuration="2.582344121s" podCreationTimestamp="2025-11-22 09:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:49.574914448 +0000 UTC m=+985.717416739" watchObservedRunningTime="2025-11-22 09:19:49.582344121 +0000 UTC m=+985.724846412" Nov 22 09:19:49 crc kubenswrapper[4693]: I1122 09:19:49.603022 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.602990183 podStartE2EDuration="2.602990183s" podCreationTimestamp="2025-11-22 09:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:19:49.601550095 +0000 UTC m=+985.744052386" watchObservedRunningTime="2025-11-22 09:19:49.602990183 +0000 UTC m=+985.745492474" Nov 22 09:19:51 crc kubenswrapper[4693]: I1122 09:19:51.036950 4693 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-7965876c4f-d2bqt" podUID="8187f5e4-a62d-444b-99d8-694e067cb8f4" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.161:5353: i/o timeout" Nov 22 09:19:51 crc kubenswrapper[4693]: I1122 09:19:51.484836 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 09:19:52 crc kubenswrapper[4693]: I1122 09:19:52.900716 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 09:19:52 crc kubenswrapper[4693]: I1122 09:19:52.901123 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 09:19:53 crc kubenswrapper[4693]: I1122 09:19:53.179990 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 09:19:53 crc kubenswrapper[4693]: I1122 09:19:53.869309 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Nov 22 09:19:54 crc kubenswrapper[4693]: I1122 09:19:54.928907 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:19:54 crc kubenswrapper[4693]: I1122 09:19:54.929128 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="5f7105e9-640f-4369-970a-df4123cce84a" containerName="kube-state-metrics" containerID="cri-o://783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546" gracePeriod=30 Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.340838 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.471987 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lt9zc\" (UniqueName: \"kubernetes.io/projected/5f7105e9-640f-4369-970a-df4123cce84a-kube-api-access-lt9zc\") pod \"5f7105e9-640f-4369-970a-df4123cce84a\" (UID: \"5f7105e9-640f-4369-970a-df4123cce84a\") " Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.480925 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f7105e9-640f-4369-970a-df4123cce84a-kube-api-access-lt9zc" (OuterVolumeSpecName: "kube-api-access-lt9zc") pod "5f7105e9-640f-4369-970a-df4123cce84a" (UID: "5f7105e9-640f-4369-970a-df4123cce84a"). InnerVolumeSpecName "kube-api-access-lt9zc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.574773 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lt9zc\" (UniqueName: \"kubernetes.io/projected/5f7105e9-640f-4369-970a-df4123cce84a-kube-api-access-lt9zc\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.637777 4693 generic.go:334] "Generic (PLEG): container finished" podID="5f7105e9-640f-4369-970a-df4123cce84a" containerID="783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546" exitCode=2 Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.637815 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5f7105e9-640f-4369-970a-df4123cce84a","Type":"ContainerDied","Data":"783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546"} Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.637862 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"5f7105e9-640f-4369-970a-df4123cce84a","Type":"ContainerDied","Data":"425088be5d5984b7711cc4a69d6c0766c320a61538029070e62bb826a2547dd7"} Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.637884 4693 scope.go:117] "RemoveContainer" containerID="783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.638001 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.666142 4693 scope.go:117] "RemoveContainer" containerID="783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546" Nov 22 09:19:55 crc kubenswrapper[4693]: E1122 09:19:55.666794 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546\": container with ID starting with 783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546 not found: ID does not exist" containerID="783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.666832 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546"} err="failed to get container status \"783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546\": rpc error: code = NotFound desc = could not find container \"783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546\": container with ID starting with 783fdfeb5ac1c1d32303e254e07106675a607c01b3635110a147ed48aeba5546 not found: ID does not exist" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.680920 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.690827 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.701532 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:19:55 crc kubenswrapper[4693]: E1122 09:19:55.701998 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f7105e9-640f-4369-970a-df4123cce84a" containerName="kube-state-metrics" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.702019 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f7105e9-640f-4369-970a-df4123cce84a" containerName="kube-state-metrics" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.702238 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f7105e9-640f-4369-970a-df4123cce84a" containerName="kube-state-metrics" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.702896 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.704654 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.708162 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.710394 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.778056 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.778098 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.778134 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.778264 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gk7b8\" (UniqueName: \"kubernetes.io/projected/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-api-access-gk7b8\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.880807 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.881159 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.880919 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.881473 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.882065 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gk7b8\" (UniqueName: \"kubernetes.io/projected/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-api-access-gk7b8\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.882351 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.884535 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.885297 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.897229 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:55 crc kubenswrapper[4693]: I1122 09:19:55.898374 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gk7b8\" (UniqueName: \"kubernetes.io/projected/ff38a410-4f38-40dc-afa6-2d2cca6054ca-kube-api-access-gk7b8\") pod \"kube-state-metrics-0\" (UID: \"ff38a410-4f38-40dc-afa6-2d2cca6054ca\") " pod="openstack/kube-state-metrics-0" Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.022234 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.166437 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f7105e9-640f-4369-970a-df4123cce84a" path="/var/lib/kubelet/pods/5f7105e9-640f-4369-970a-df4123cce84a/volumes" Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.421193 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.456313 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.456687 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-central-agent" containerID="cri-o://86aefc698cf1649569b36e22daea9185d26af25c491ff570c499c58c999a088e" gracePeriod=30 Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.457119 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="proxy-httpd" containerID="cri-o://a8b67b5aa1bcf6f834857569b3519054893bd6bac1acd48be5af816e126c0dfa" gracePeriod=30 Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.457153 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-notification-agent" containerID="cri-o://6c25848e779b80c20dec2e49feacb88fc030de2be00fa95d42ed95a8e9370f64" gracePeriod=30 Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.457391 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="sg-core" containerID="cri-o://edf9f226646ba2ecdb8e63f567ef5a7e23dd6d22d7b3327bfb57bee50e5caf9a" gracePeriod=30 Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.648964 4693 generic.go:334] "Generic (PLEG): container finished" podID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerID="a8b67b5aa1bcf6f834857569b3519054893bd6bac1acd48be5af816e126c0dfa" exitCode=0 Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.649204 4693 generic.go:334] "Generic (PLEG): container finished" podID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerID="edf9f226646ba2ecdb8e63f567ef5a7e23dd6d22d7b3327bfb57bee50e5caf9a" exitCode=2 Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.649250 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerDied","Data":"a8b67b5aa1bcf6f834857569b3519054893bd6bac1acd48be5af816e126c0dfa"} Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.649269 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerDied","Data":"edf9f226646ba2ecdb8e63f567ef5a7e23dd6d22d7b3327bfb57bee50e5caf9a"} Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.650720 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ff38a410-4f38-40dc-afa6-2d2cca6054ca","Type":"ContainerStarted","Data":"eea39fee82520f023bedb7085752d3587320ae4a9cf333444c5063b3f6635b8e"} Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.964009 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 09:19:56 crc kubenswrapper[4693]: I1122 09:19:56.964049 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.192:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.671122 4693 generic.go:334] "Generic (PLEG): container finished" podID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerID="6c25848e779b80c20dec2e49feacb88fc030de2be00fa95d42ed95a8e9370f64" exitCode=0 Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.671495 4693 generic.go:334] "Generic (PLEG): container finished" podID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerID="86aefc698cf1649569b36e22daea9185d26af25c491ff570c499c58c999a088e" exitCode=0 Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.671311 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerDied","Data":"6c25848e779b80c20dec2e49feacb88fc030de2be00fa95d42ed95a8e9370f64"} Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.671560 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerDied","Data":"86aefc698cf1649569b36e22daea9185d26af25c491ff570c499c58c999a088e"} Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.677526 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ff38a410-4f38-40dc-afa6-2d2cca6054ca","Type":"ContainerStarted","Data":"965d52f7996571c86272a0c5a96d8efa22f8581df5166411be1fefc112512e67"} Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.678890 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.867775 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.890692 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.568896608 podStartE2EDuration="2.890674236s" podCreationTimestamp="2025-11-22 09:19:55 +0000 UTC" firstStartedPulling="2025-11-22 09:19:56.429235064 +0000 UTC m=+992.571737356" lastFinishedPulling="2025-11-22 09:19:56.751012693 +0000 UTC m=+992.893514984" observedRunningTime="2025-11-22 09:19:57.699088785 +0000 UTC m=+993.841591097" watchObservedRunningTime="2025-11-22 09:19:57.890674236 +0000 UTC m=+994.033176527" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.900390 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.900526 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.926972 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-log-httpd\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927075 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-scripts\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927162 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnnq8\" (UniqueName: \"kubernetes.io/projected/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-kube-api-access-mnnq8\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927233 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-config-data\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927481 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-run-httpd\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927531 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-sg-core-conf-yaml\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927653 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-combined-ca-bundle\") pod \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\" (UID: \"4631c5a1-a696-4b97-bcd8-c2f5330b31f0\") " Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.927779 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.929164 4693 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.930308 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.936382 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-kube-api-access-mnnq8" (OuterVolumeSpecName: "kube-api-access-mnnq8") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "kube-api-access-mnnq8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.936533 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-scripts" (OuterVolumeSpecName: "scripts") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:57 crc kubenswrapper[4693]: I1122 09:19:57.962663 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.000110 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.023971 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-config-data" (OuterVolumeSpecName: "config-data") pod "4631c5a1-a696-4b97-bcd8-c2f5330b31f0" (UID: "4631c5a1-a696-4b97-bcd8-c2f5330b31f0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.031389 4693 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.031796 4693 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.031897 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.031959 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.032013 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnnq8\" (UniqueName: \"kubernetes.io/projected/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-kube-api-access-mnnq8\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.032063 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4631c5a1-a696-4b97-bcd8-c2f5330b31f0-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.180590 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.215978 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.693363 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"4631c5a1-a696-4b97-bcd8-c2f5330b31f0","Type":"ContainerDied","Data":"42a15390e513941c0ff7977f988b7325a99da37d781121f3e607dbf17924ba34"} Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.693484 4693 scope.go:117] "RemoveContainer" containerID="a8b67b5aa1bcf6f834857569b3519054893bd6bac1acd48be5af816e126c0dfa" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.693755 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.724583 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.731242 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.733433 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741027 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:58 crc kubenswrapper[4693]: E1122 09:19:58.741646 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="sg-core" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741659 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="sg-core" Nov 22 09:19:58 crc kubenswrapper[4693]: E1122 09:19:58.741676 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="proxy-httpd" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741682 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="proxy-httpd" Nov 22 09:19:58 crc kubenswrapper[4693]: E1122 09:19:58.741701 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-notification-agent" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741708 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-notification-agent" Nov 22 09:19:58 crc kubenswrapper[4693]: E1122 09:19:58.741748 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-central-agent" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741756 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-central-agent" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741969 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="proxy-httpd" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741980 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-central-agent" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.741990 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="sg-core" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.742007 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" containerName="ceilometer-notification-agent" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.744468 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.745986 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.749567 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.749603 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.749755 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.782880 4693 scope.go:117] "RemoveContainer" containerID="edf9f226646ba2ecdb8e63f567ef5a7e23dd6d22d7b3327bfb57bee50e5caf9a" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.810175 4693 scope.go:117] "RemoveContainer" containerID="6c25848e779b80c20dec2e49feacb88fc030de2be00fa95d42ed95a8e9370f64" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.825785 4693 scope.go:117] "RemoveContainer" containerID="86aefc698cf1649569b36e22daea9185d26af25c491ff570c499c58c999a088e" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846451 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846554 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-run-httpd\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846634 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfw6j\" (UniqueName: \"kubernetes.io/projected/6293eff4-712f-464b-aa3a-c24e83313385-kube-api-access-sfw6j\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846692 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-config-data\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846775 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846872 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-scripts\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846895 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-log-httpd\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.846980 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.916981 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.917051 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.193:8775/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950221 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-scripts\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950326 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-log-httpd\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950425 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950487 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950542 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-run-httpd\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950609 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfw6j\" (UniqueName: \"kubernetes.io/projected/6293eff4-712f-464b-aa3a-c24e83313385-kube-api-access-sfw6j\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950649 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-config-data\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950704 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.950890 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-log-httpd\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.951202 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-run-httpd\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.955636 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-config-data\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.956331 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.956647 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.964690 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.966501 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfw6j\" (UniqueName: \"kubernetes.io/projected/6293eff4-712f-464b-aa3a-c24e83313385-kube-api-access-sfw6j\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:58 crc kubenswrapper[4693]: I1122 09:19:58.975473 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-scripts\") pod \"ceilometer-0\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " pod="openstack/ceilometer-0" Nov 22 09:19:59 crc kubenswrapper[4693]: I1122 09:19:59.075032 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:19:59 crc kubenswrapper[4693]: I1122 09:19:59.513659 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:19:59 crc kubenswrapper[4693]: I1122 09:19:59.702795 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerStarted","Data":"60fa9dcdfbd41a85a8a3fc5def2a356f2dbf563a74c63a586f617f848b7046f1"} Nov 22 09:20:00 crc kubenswrapper[4693]: I1122 09:20:00.173967 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4631c5a1-a696-4b97-bcd8-c2f5330b31f0" path="/var/lib/kubelet/pods/4631c5a1-a696-4b97-bcd8-c2f5330b31f0/volumes" Nov 22 09:20:00 crc kubenswrapper[4693]: I1122 09:20:00.710449 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerStarted","Data":"fbe9f94431ab78a52f2b6a812910ec0d64dd0d47097de627428699edfd49b70b"} Nov 22 09:20:01 crc kubenswrapper[4693]: I1122 09:20:01.729330 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerStarted","Data":"c675eea055fe4db95e77e3ecb6a919ba82a7f4d09203b04c5f31fbb5a0673ab9"} Nov 22 09:20:02 crc kubenswrapper[4693]: I1122 09:20:02.737174 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerStarted","Data":"471c170d952e8e8a7bfb755ddd1fd522ec5f22e22398fb8fe78da57465aa3325"} Nov 22 09:20:03 crc kubenswrapper[4693]: I1122 09:20:03.745440 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerStarted","Data":"8c46b6a309214bac7baac4768b7d0027f5fe6ee991d47271b54fc3ed7b69b6a7"} Nov 22 09:20:03 crc kubenswrapper[4693]: I1122 09:20:03.745691 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:20:03 crc kubenswrapper[4693]: I1122 09:20:03.762449 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.8713660540000001 podStartE2EDuration="5.76243934s" podCreationTimestamp="2025-11-22 09:19:58 +0000 UTC" firstStartedPulling="2025-11-22 09:19:59.519862024 +0000 UTC m=+995.662364315" lastFinishedPulling="2025-11-22 09:20:03.41093531 +0000 UTC m=+999.553437601" observedRunningTime="2025-11-22 09:20:03.758246344 +0000 UTC m=+999.900748635" watchObservedRunningTime="2025-11-22 09:20:03.76243934 +0000 UTC m=+999.904941631" Nov 22 09:20:05 crc kubenswrapper[4693]: I1122 09:20:05.885889 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 09:20:05 crc kubenswrapper[4693]: I1122 09:20:05.886202 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 09:20:05 crc kubenswrapper[4693]: I1122 09:20:05.886595 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 09:20:05 crc kubenswrapper[4693]: I1122 09:20:05.886611 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 09:20:05 crc kubenswrapper[4693]: I1122 09:20:05.888190 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 09:20:05 crc kubenswrapper[4693]: I1122 09:20:05.888521 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.029110 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-4v4px"] Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.030563 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.043754 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-4v4px"] Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.067827 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.173717 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.173769 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-svc\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.173794 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.173869 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2v7jf\" (UniqueName: \"kubernetes.io/projected/c01ee7ab-a925-456b-bd0f-124846156df3-kube-api-access-2v7jf\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.173910 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-config\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.173960 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.275891 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2v7jf\" (UniqueName: \"kubernetes.io/projected/c01ee7ab-a925-456b-bd0f-124846156df3-kube-api-access-2v7jf\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.275953 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-config\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.275996 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.276044 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.276075 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-svc\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.276095 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.276933 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-nb\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.277243 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-config\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.277273 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-svc\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.277388 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-swift-storage-0\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.277691 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-sb\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.298705 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2v7jf\" (UniqueName: \"kubernetes.io/projected/c01ee7ab-a925-456b-bd0f-124846156df3-kube-api-access-2v7jf\") pod \"dnsmasq-dns-55bfb77665-4v4px\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.355632 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:06 crc kubenswrapper[4693]: I1122 09:20:06.883477 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-4v4px"] Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.773210 4693 generic.go:334] "Generic (PLEG): container finished" podID="c01ee7ab-a925-456b-bd0f-124846156df3" containerID="bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727" exitCode=0 Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.773316 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" event={"ID":"c01ee7ab-a925-456b-bd0f-124846156df3","Type":"ContainerDied","Data":"bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727"} Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.774075 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" event={"ID":"c01ee7ab-a925-456b-bd0f-124846156df3","Type":"ContainerStarted","Data":"d6e8b325f8ec890b156b4bf5d6a07c4e0cf083bc6a23a9061b9c132bb4d34ab6"} Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.902320 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.902564 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-central-agent" containerID="cri-o://fbe9f94431ab78a52f2b6a812910ec0d64dd0d47097de627428699edfd49b70b" gracePeriod=30 Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.902970 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="sg-core" containerID="cri-o://471c170d952e8e8a7bfb755ddd1fd522ec5f22e22398fb8fe78da57465aa3325" gracePeriod=30 Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.902999 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="proxy-httpd" containerID="cri-o://8c46b6a309214bac7baac4768b7d0027f5fe6ee991d47271b54fc3ed7b69b6a7" gracePeriod=30 Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.903048 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-notification-agent" containerID="cri-o://c675eea055fe4db95e77e3ecb6a919ba82a7f4d09203b04c5f31fbb5a0673ab9" gracePeriod=30 Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.911788 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.912268 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 09:20:07 crc kubenswrapper[4693]: I1122 09:20:07.928986 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.235269 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.782363 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" event={"ID":"c01ee7ab-a925-456b-bd0f-124846156df3","Type":"ContainerStarted","Data":"09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d"} Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.782524 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785015 4693 generic.go:334] "Generic (PLEG): container finished" podID="6293eff4-712f-464b-aa3a-c24e83313385" containerID="8c46b6a309214bac7baac4768b7d0027f5fe6ee991d47271b54fc3ed7b69b6a7" exitCode=0 Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785053 4693 generic.go:334] "Generic (PLEG): container finished" podID="6293eff4-712f-464b-aa3a-c24e83313385" containerID="471c170d952e8e8a7bfb755ddd1fd522ec5f22e22398fb8fe78da57465aa3325" exitCode=2 Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785063 4693 generic.go:334] "Generic (PLEG): container finished" podID="6293eff4-712f-464b-aa3a-c24e83313385" containerID="c675eea055fe4db95e77e3ecb6a919ba82a7f4d09203b04c5f31fbb5a0673ab9" exitCode=0 Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785073 4693 generic.go:334] "Generic (PLEG): container finished" podID="6293eff4-712f-464b-aa3a-c24e83313385" containerID="fbe9f94431ab78a52f2b6a812910ec0d64dd0d47097de627428699edfd49b70b" exitCode=0 Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785100 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerDied","Data":"8c46b6a309214bac7baac4768b7d0027f5fe6ee991d47271b54fc3ed7b69b6a7"} Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785158 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerDied","Data":"471c170d952e8e8a7bfb755ddd1fd522ec5f22e22398fb8fe78da57465aa3325"} Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785170 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerDied","Data":"c675eea055fe4db95e77e3ecb6a919ba82a7f4d09203b04c5f31fbb5a0673ab9"} Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785179 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerDied","Data":"fbe9f94431ab78a52f2b6a812910ec0d64dd0d47097de627428699edfd49b70b"} Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785188 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"6293eff4-712f-464b-aa3a-c24e83313385","Type":"ContainerDied","Data":"60fa9dcdfbd41a85a8a3fc5def2a356f2dbf563a74c63a586f617f848b7046f1"} Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785197 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="60fa9dcdfbd41a85a8a3fc5def2a356f2dbf563a74c63a586f617f848b7046f1" Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785323 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-log" containerID="cri-o://043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868" gracePeriod=30 Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.785342 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-api" containerID="cri-o://84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3" gracePeriod=30 Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.794874 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.858547 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" podStartSLOduration=2.858523459 podStartE2EDuration="2.858523459s" podCreationTimestamp="2025-11-22 09:20:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:08.804948454 +0000 UTC m=+1004.947450744" watchObservedRunningTime="2025-11-22 09:20:08.858523459 +0000 UTC m=+1005.001025749" Nov 22 09:20:08 crc kubenswrapper[4693]: I1122 09:20:08.866460 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045001 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-combined-ca-bundle\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045067 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-sg-core-conf-yaml\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045178 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-log-httpd\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045259 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-run-httpd\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045319 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-config-data\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045479 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-ceilometer-tls-certs\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045531 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sfw6j\" (UniqueName: \"kubernetes.io/projected/6293eff4-712f-464b-aa3a-c24e83313385-kube-api-access-sfw6j\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.045581 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-scripts\") pod \"6293eff4-712f-464b-aa3a-c24e83313385\" (UID: \"6293eff4-712f-464b-aa3a-c24e83313385\") " Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.047123 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.047375 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.051227 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-scripts" (OuterVolumeSpecName: "scripts") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.051337 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6293eff4-712f-464b-aa3a-c24e83313385-kube-api-access-sfw6j" (OuterVolumeSpecName: "kube-api-access-sfw6j") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "kube-api-access-sfw6j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.069030 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.086007 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.108259 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.113753 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-config-data" (OuterVolumeSpecName: "config-data") pod "6293eff4-712f-464b-aa3a-c24e83313385" (UID: "6293eff4-712f-464b-aa3a-c24e83313385"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147723 4693 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147751 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sfw6j\" (UniqueName: \"kubernetes.io/projected/6293eff4-712f-464b-aa3a-c24e83313385-kube-api-access-sfw6j\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147763 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147773 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147783 4693 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147791 4693 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-log-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147798 4693 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/6293eff4-712f-464b-aa3a-c24e83313385-run-httpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.147806 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6293eff4-712f-464b-aa3a-c24e83313385-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.794705 4693 generic.go:334] "Generic (PLEG): container finished" podID="effdb574-9196-4c67-92d6-d8101efd3780" containerID="043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868" exitCode=143 Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.795414 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"effdb574-9196-4c67-92d6-d8101efd3780","Type":"ContainerDied","Data":"043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868"} Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.795471 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.820412 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.825453 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.837917 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:20:09 crc kubenswrapper[4693]: E1122 09:20:09.838458 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-notification-agent" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838483 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-notification-agent" Nov 22 09:20:09 crc kubenswrapper[4693]: E1122 09:20:09.838495 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="proxy-httpd" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838502 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="proxy-httpd" Nov 22 09:20:09 crc kubenswrapper[4693]: E1122 09:20:09.838513 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-central-agent" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838519 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-central-agent" Nov 22 09:20:09 crc kubenswrapper[4693]: E1122 09:20:09.838544 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="sg-core" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838550 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="sg-core" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838734 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="sg-core" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838744 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="proxy-httpd" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838757 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-notification-agent" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.838772 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6293eff4-712f-464b-aa3a-c24e83313385" containerName="ceilometer-central-agent" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.841339 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.842799 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.842966 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.845761 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.845973 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.961988 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee195f08-f515-47ee-bdc0-34d1396136be-log-httpd\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962032 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-config-data\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962089 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962126 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pl69z\" (UniqueName: \"kubernetes.io/projected/ee195f08-f515-47ee-bdc0-34d1396136be-kube-api-access-pl69z\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962157 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-scripts\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962184 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962272 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:09 crc kubenswrapper[4693]: I1122 09:20:09.962323 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee195f08-f515-47ee-bdc0-34d1396136be-run-httpd\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.063904 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064204 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pl69z\" (UniqueName: \"kubernetes.io/projected/ee195f08-f515-47ee-bdc0-34d1396136be-kube-api-access-pl69z\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064236 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-scripts\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064267 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064342 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064382 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee195f08-f515-47ee-bdc0-34d1396136be-run-httpd\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064401 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee195f08-f515-47ee-bdc0-34d1396136be-log-httpd\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064421 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-config-data\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.064963 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee195f08-f515-47ee-bdc0-34d1396136be-log-httpd\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.065004 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ee195f08-f515-47ee-bdc0-34d1396136be-run-httpd\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.069921 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.070158 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-config-data\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.070308 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.070385 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.079029 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ee195f08-f515-47ee-bdc0-34d1396136be-scripts\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.079039 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pl69z\" (UniqueName: \"kubernetes.io/projected/ee195f08-f515-47ee-bdc0-34d1396136be-kube-api-access-pl69z\") pod \"ceilometer-0\" (UID: \"ee195f08-f515-47ee-bdc0-34d1396136be\") " pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.154638 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.156752 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6293eff4-712f-464b-aa3a-c24e83313385" path="/var/lib/kubelet/pods/6293eff4-712f-464b-aa3a-c24e83313385/volumes" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.527681 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Nov 22 09:20:10 crc kubenswrapper[4693]: W1122 09:20:10.550230 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podee195f08_f515_47ee_bdc0_34d1396136be.slice/crio-5b98d50c2da5a8a79064d06101106f0647eeee523c0005a2af337a0b863e0813 WatchSource:0}: Error finding container 5b98d50c2da5a8a79064d06101106f0647eeee523c0005a2af337a0b863e0813: Status 404 returned error can't find the container with id 5b98d50c2da5a8a79064d06101106f0647eeee523c0005a2af337a0b863e0813 Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.723429 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.805302 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee195f08-f515-47ee-bdc0-34d1396136be","Type":"ContainerStarted","Data":"5b98d50c2da5a8a79064d06101106f0647eeee523c0005a2af337a0b863e0813"} Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.807123 4693 generic.go:334] "Generic (PLEG): container finished" podID="10ad3952-c878-4643-ab1d-af0376d52101" containerID="479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143" exitCode=137 Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.807184 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.807229 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"10ad3952-c878-4643-ab1d-af0376d52101","Type":"ContainerDied","Data":"479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143"} Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.807299 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"10ad3952-c878-4643-ab1d-af0376d52101","Type":"ContainerDied","Data":"b3b61fe5acd4a72bde2b732272d3a93918055e6f2ab308062ede3f77880bf598"} Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.807334 4693 scope.go:117] "RemoveContainer" containerID="479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.827072 4693 scope.go:117] "RemoveContainer" containerID="479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143" Nov 22 09:20:10 crc kubenswrapper[4693]: E1122 09:20:10.827444 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143\": container with ID starting with 479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143 not found: ID does not exist" containerID="479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.827479 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143"} err="failed to get container status \"479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143\": rpc error: code = NotFound desc = could not find container \"479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143\": container with ID starting with 479917dd6dffb14515acf553799cc068dd027b287840f928ffcca793b0101143 not found: ID does not exist" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.879322 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-combined-ca-bundle\") pod \"10ad3952-c878-4643-ab1d-af0376d52101\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.879827 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lxwzk\" (UniqueName: \"kubernetes.io/projected/10ad3952-c878-4643-ab1d-af0376d52101-kube-api-access-lxwzk\") pod \"10ad3952-c878-4643-ab1d-af0376d52101\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.879869 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-config-data\") pod \"10ad3952-c878-4643-ab1d-af0376d52101\" (UID: \"10ad3952-c878-4643-ab1d-af0376d52101\") " Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.885088 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10ad3952-c878-4643-ab1d-af0376d52101-kube-api-access-lxwzk" (OuterVolumeSpecName: "kube-api-access-lxwzk") pod "10ad3952-c878-4643-ab1d-af0376d52101" (UID: "10ad3952-c878-4643-ab1d-af0376d52101"). InnerVolumeSpecName "kube-api-access-lxwzk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.902957 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10ad3952-c878-4643-ab1d-af0376d52101" (UID: "10ad3952-c878-4643-ab1d-af0376d52101"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.906577 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-config-data" (OuterVolumeSpecName: "config-data") pod "10ad3952-c878-4643-ab1d-af0376d52101" (UID: "10ad3952-c878-4643-ab1d-af0376d52101"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.982045 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lxwzk\" (UniqueName: \"kubernetes.io/projected/10ad3952-c878-4643-ab1d-af0376d52101-kube-api-access-lxwzk\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.982073 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:10 crc kubenswrapper[4693]: I1122 09:20:10.982083 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10ad3952-c878-4643-ab1d-af0376d52101-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.131591 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.139912 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.149768 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:20:11 crc kubenswrapper[4693]: E1122 09:20:11.150126 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10ad3952-c878-4643-ab1d-af0376d52101" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.150139 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="10ad3952-c878-4643-ab1d-af0376d52101" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.150338 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="10ad3952-c878-4643-ab1d-af0376d52101" containerName="nova-cell1-novncproxy-novncproxy" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.150962 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.152235 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.152651 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.152821 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.166492 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.286908 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.286986 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.287039 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c29d\" (UniqueName: \"kubernetes.io/projected/7135aae5-46b3-4654-aba4-70a6f2df0d1e-kube-api-access-2c29d\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.287069 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.287250 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.388525 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c29d\" (UniqueName: \"kubernetes.io/projected/7135aae5-46b3-4654-aba4-70a6f2df0d1e-kube-api-access-2c29d\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.388897 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.388988 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.389041 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.389084 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.392275 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.392817 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.393225 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.393538 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7135aae5-46b3-4654-aba4-70a6f2df0d1e-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.403322 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c29d\" (UniqueName: \"kubernetes.io/projected/7135aae5-46b3-4654-aba4-70a6f2df0d1e-kube-api-access-2c29d\") pod \"nova-cell1-novncproxy-0\" (UID: \"7135aae5-46b3-4654-aba4-70a6f2df0d1e\") " pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.477321 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.815036 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee195f08-f515-47ee-bdc0-34d1396136be","Type":"ContainerStarted","Data":"0427506458e2f84b8cadb0c4e1017785669b62d49b4ba6c190d74210d1b153fa"} Nov 22 09:20:11 crc kubenswrapper[4693]: W1122 09:20:11.868572 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7135aae5_46b3_4654_aba4_70a6f2df0d1e.slice/crio-acfb4fb7b13b9dd99a01e65c1b1669149cdc10604571d4505ff2a6b37c90e7a2 WatchSource:0}: Error finding container acfb4fb7b13b9dd99a01e65c1b1669149cdc10604571d4505ff2a6b37c90e7a2: Status 404 returned error can't find the container with id acfb4fb7b13b9dd99a01e65c1b1669149cdc10604571d4505ff2a6b37c90e7a2 Nov 22 09:20:11 crc kubenswrapper[4693]: I1122 09:20:11.875458 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.159493 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10ad3952-c878-4643-ab1d-af0376d52101" path="/var/lib/kubelet/pods/10ad3952-c878-4643-ab1d-af0376d52101/volumes" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.235794 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.421068 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/effdb574-9196-4c67-92d6-d8101efd3780-logs\") pod \"effdb574-9196-4c67-92d6-d8101efd3780\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.421196 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wwgqp\" (UniqueName: \"kubernetes.io/projected/effdb574-9196-4c67-92d6-d8101efd3780-kube-api-access-wwgqp\") pod \"effdb574-9196-4c67-92d6-d8101efd3780\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.421261 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-combined-ca-bundle\") pod \"effdb574-9196-4c67-92d6-d8101efd3780\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.421323 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-config-data\") pod \"effdb574-9196-4c67-92d6-d8101efd3780\" (UID: \"effdb574-9196-4c67-92d6-d8101efd3780\") " Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.421537 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/effdb574-9196-4c67-92d6-d8101efd3780-logs" (OuterVolumeSpecName: "logs") pod "effdb574-9196-4c67-92d6-d8101efd3780" (UID: "effdb574-9196-4c67-92d6-d8101efd3780"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.421867 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/effdb574-9196-4c67-92d6-d8101efd3780-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.426132 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/effdb574-9196-4c67-92d6-d8101efd3780-kube-api-access-wwgqp" (OuterVolumeSpecName: "kube-api-access-wwgqp") pod "effdb574-9196-4c67-92d6-d8101efd3780" (UID: "effdb574-9196-4c67-92d6-d8101efd3780"). InnerVolumeSpecName "kube-api-access-wwgqp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.447477 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-config-data" (OuterVolumeSpecName: "config-data") pod "effdb574-9196-4c67-92d6-d8101efd3780" (UID: "effdb574-9196-4c67-92d6-d8101efd3780"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.450029 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "effdb574-9196-4c67-92d6-d8101efd3780" (UID: "effdb574-9196-4c67-92d6-d8101efd3780"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.523672 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.523695 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/effdb574-9196-4c67-92d6-d8101efd3780-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.523707 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wwgqp\" (UniqueName: \"kubernetes.io/projected/effdb574-9196-4c67-92d6-d8101efd3780-kube-api-access-wwgqp\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.828255 4693 generic.go:334] "Generic (PLEG): container finished" podID="effdb574-9196-4c67-92d6-d8101efd3780" containerID="84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3" exitCode=0 Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.828330 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.828366 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"effdb574-9196-4c67-92d6-d8101efd3780","Type":"ContainerDied","Data":"84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3"} Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.828413 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"effdb574-9196-4c67-92d6-d8101efd3780","Type":"ContainerDied","Data":"7398e9abe24a1fcd96c10832b194711c35e0cf68d50eef486387132cd17433f5"} Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.828440 4693 scope.go:117] "RemoveContainer" containerID="84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.829666 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7135aae5-46b3-4654-aba4-70a6f2df0d1e","Type":"ContainerStarted","Data":"8ebbbdb6fbc01c2a9a24899829bd0296dc73c1f3e9c3ea955c7b76d203a60cc9"} Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.829694 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"7135aae5-46b3-4654-aba4-70a6f2df0d1e","Type":"ContainerStarted","Data":"acfb4fb7b13b9dd99a01e65c1b1669149cdc10604571d4505ff2a6b37c90e7a2"} Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.833755 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee195f08-f515-47ee-bdc0-34d1396136be","Type":"ContainerStarted","Data":"e6a45cd796832482a9957b68c47084ed0f514787e6c24cdcff930ddf0cad3b0f"} Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.844737 4693 scope.go:117] "RemoveContainer" containerID="043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.854015 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=1.854001132 podStartE2EDuration="1.854001132s" podCreationTimestamp="2025-11-22 09:20:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:12.853144621 +0000 UTC m=+1008.995646912" watchObservedRunningTime="2025-11-22 09:20:12.854001132 +0000 UTC m=+1008.996503424" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.864663 4693 scope.go:117] "RemoveContainer" containerID="84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3" Nov 22 09:20:12 crc kubenswrapper[4693]: E1122 09:20:12.866737 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3\": container with ID starting with 84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3 not found: ID does not exist" containerID="84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.866775 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3"} err="failed to get container status \"84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3\": rpc error: code = NotFound desc = could not find container \"84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3\": container with ID starting with 84e7b0df4d39e0025b8e135038f29b1f8267b285064109f4bf299b05ebefd6b3 not found: ID does not exist" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.866806 4693 scope.go:117] "RemoveContainer" containerID="043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868" Nov 22 09:20:12 crc kubenswrapper[4693]: E1122 09:20:12.867951 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868\": container with ID starting with 043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868 not found: ID does not exist" containerID="043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.867975 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868"} err="failed to get container status \"043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868\": rpc error: code = NotFound desc = could not find container \"043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868\": container with ID starting with 043993c926fe8487f6f4515457b834ce09decead9fc4077d7e71e9ac65a09868 not found: ID does not exist" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.877310 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.883732 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.888680 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:12 crc kubenswrapper[4693]: E1122 09:20:12.889024 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-api" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.889037 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-api" Nov 22 09:20:12 crc kubenswrapper[4693]: E1122 09:20:12.889052 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-log" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.889057 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-log" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.889235 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-log" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.889258 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="effdb574-9196-4c67-92d6-d8101efd3780" containerName="nova-api-api" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.890210 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.891787 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.892050 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.892171 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 09:20:12 crc kubenswrapper[4693]: I1122 09:20:12.909278 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.037547 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-config-data\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.037594 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-public-tls-certs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.037642 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.037669 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7244d9a2-04a4-45b6-820b-c66ef9928c1f-logs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.037685 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.037724 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cxcxd\" (UniqueName: \"kubernetes.io/projected/7244d9a2-04a4-45b6-820b-c66ef9928c1f-kube-api-access-cxcxd\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.140119 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-config-data\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.140166 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-public-tls-certs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.140243 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.140271 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7244d9a2-04a4-45b6-820b-c66ef9928c1f-logs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.140762 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.140807 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cxcxd\" (UniqueName: \"kubernetes.io/projected/7244d9a2-04a4-45b6-820b-c66ef9928c1f-kube-api-access-cxcxd\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.141186 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7244d9a2-04a4-45b6-820b-c66ef9928c1f-logs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.143820 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.145214 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-public-tls-certs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.157531 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.157570 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-config-data\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.159685 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cxcxd\" (UniqueName: \"kubernetes.io/projected/7244d9a2-04a4-45b6-820b-c66ef9928c1f-kube-api-access-cxcxd\") pod \"nova-api-0\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.206961 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.601060 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.844645 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee195f08-f515-47ee-bdc0-34d1396136be","Type":"ContainerStarted","Data":"2f737c6010ca2bedd076801564d4745168ebb00aba0d17fa8b3dfe60ed68adfe"} Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.848208 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7244d9a2-04a4-45b6-820b-c66ef9928c1f","Type":"ContainerStarted","Data":"3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9"} Nov 22 09:20:13 crc kubenswrapper[4693]: I1122 09:20:13.848243 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7244d9a2-04a4-45b6-820b-c66ef9928c1f","Type":"ContainerStarted","Data":"ee935354ecbed267204998dce016915f55d180e3a6fabb1b22aa79b4fd58ea02"} Nov 22 09:20:14 crc kubenswrapper[4693]: I1122 09:20:14.155219 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="effdb574-9196-4c67-92d6-d8101efd3780" path="/var/lib/kubelet/pods/effdb574-9196-4c67-92d6-d8101efd3780/volumes" Nov 22 09:20:14 crc kubenswrapper[4693]: I1122 09:20:14.856637 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7244d9a2-04a4-45b6-820b-c66ef9928c1f","Type":"ContainerStarted","Data":"69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac"} Nov 22 09:20:14 crc kubenswrapper[4693]: I1122 09:20:14.860900 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"ee195f08-f515-47ee-bdc0-34d1396136be","Type":"ContainerStarted","Data":"399ddea058b9fc5bf85dca745ab140adf3b58a7299f41d4510f6a9e27654d3ab"} Nov 22 09:20:14 crc kubenswrapper[4693]: I1122 09:20:14.861395 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Nov 22 09:20:14 crc kubenswrapper[4693]: I1122 09:20:14.875743 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.875733181 podStartE2EDuration="2.875733181s" podCreationTimestamp="2025-11-22 09:20:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:14.8740688 +0000 UTC m=+1011.016571091" watchObservedRunningTime="2025-11-22 09:20:14.875733181 +0000 UTC m=+1011.018235472" Nov 22 09:20:14 crc kubenswrapper[4693]: I1122 09:20:14.900814 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.568315864 podStartE2EDuration="5.900779428s" podCreationTimestamp="2025-11-22 09:20:09 +0000 UTC" firstStartedPulling="2025-11-22 09:20:10.55482091 +0000 UTC m=+1006.697323201" lastFinishedPulling="2025-11-22 09:20:13.887284475 +0000 UTC m=+1010.029786765" observedRunningTime="2025-11-22 09:20:14.890192196 +0000 UTC m=+1011.032694487" watchObservedRunningTime="2025-11-22 09:20:14.900779428 +0000 UTC m=+1011.043281719" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.356956 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.417709 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-znqsl"] Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.417936 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" containerName="dnsmasq-dns" containerID="cri-o://a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc" gracePeriod=10 Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.477482 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.813722 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.821391 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-config\") pod \"cec12aaf-a087-48db-b983-9d75767c38d1\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.821444 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-sb\") pod \"cec12aaf-a087-48db-b983-9d75767c38d1\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.821474 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-swift-storage-0\") pod \"cec12aaf-a087-48db-b983-9d75767c38d1\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.821517 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjvqt\" (UniqueName: \"kubernetes.io/projected/cec12aaf-a087-48db-b983-9d75767c38d1-kube-api-access-rjvqt\") pod \"cec12aaf-a087-48db-b983-9d75767c38d1\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.821563 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-svc\") pod \"cec12aaf-a087-48db-b983-9d75767c38d1\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.821593 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-nb\") pod \"cec12aaf-a087-48db-b983-9d75767c38d1\" (UID: \"cec12aaf-a087-48db-b983-9d75767c38d1\") " Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.837039 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cec12aaf-a087-48db-b983-9d75767c38d1-kube-api-access-rjvqt" (OuterVolumeSpecName: "kube-api-access-rjvqt") pod "cec12aaf-a087-48db-b983-9d75767c38d1" (UID: "cec12aaf-a087-48db-b983-9d75767c38d1"). InnerVolumeSpecName "kube-api-access-rjvqt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.877831 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "cec12aaf-a087-48db-b983-9d75767c38d1" (UID: "cec12aaf-a087-48db-b983-9d75767c38d1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.881940 4693 generic.go:334] "Generic (PLEG): container finished" podID="cec12aaf-a087-48db-b983-9d75767c38d1" containerID="a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc" exitCode=0 Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.884133 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.885158 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" event={"ID":"cec12aaf-a087-48db-b983-9d75767c38d1","Type":"ContainerDied","Data":"a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc"} Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.885219 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-64dbf5859c-znqsl" event={"ID":"cec12aaf-a087-48db-b983-9d75767c38d1","Type":"ContainerDied","Data":"c32389d2f5c246cac321e61f4ab66390989305667100a4ae26f2497267890e88"} Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.885244 4693 scope.go:117] "RemoveContainer" containerID="a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.902338 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "cec12aaf-a087-48db-b983-9d75767c38d1" (UID: "cec12aaf-a087-48db-b983-9d75767c38d1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.903202 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "cec12aaf-a087-48db-b983-9d75767c38d1" (UID: "cec12aaf-a087-48db-b983-9d75767c38d1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.908938 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-config" (OuterVolumeSpecName: "config") pod "cec12aaf-a087-48db-b983-9d75767c38d1" (UID: "cec12aaf-a087-48db-b983-9d75767c38d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.909497 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "cec12aaf-a087-48db-b983-9d75767c38d1" (UID: "cec12aaf-a087-48db-b983-9d75767c38d1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.912707 4693 scope.go:117] "RemoveContainer" containerID="d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.925072 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.925104 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.925119 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.925129 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjvqt\" (UniqueName: \"kubernetes.io/projected/cec12aaf-a087-48db-b983-9d75767c38d1-kube-api-access-rjvqt\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.925141 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.925149 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/cec12aaf-a087-48db-b983-9d75767c38d1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.928691 4693 scope.go:117] "RemoveContainer" containerID="a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc" Nov 22 09:20:16 crc kubenswrapper[4693]: E1122 09:20:16.929165 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc\": container with ID starting with a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc not found: ID does not exist" containerID="a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.929219 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc"} err="failed to get container status \"a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc\": rpc error: code = NotFound desc = could not find container \"a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc\": container with ID starting with a7b41b28c3619db595da57c82d8d9b56ceebf8642331b7190ad5b34a98be53bc not found: ID does not exist" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.929262 4693 scope.go:117] "RemoveContainer" containerID="d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3" Nov 22 09:20:16 crc kubenswrapper[4693]: E1122 09:20:16.930178 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3\": container with ID starting with d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3 not found: ID does not exist" containerID="d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3" Nov 22 09:20:16 crc kubenswrapper[4693]: I1122 09:20:16.930213 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3"} err="failed to get container status \"d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3\": rpc error: code = NotFound desc = could not find container \"d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3\": container with ID starting with d614f8677d4d060654130aa0a90ba3013b09bdb121ee7fca4d23c3ef00eb72c3 not found: ID does not exist" Nov 22 09:20:17 crc kubenswrapper[4693]: I1122 09:20:17.213951 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-znqsl"] Nov 22 09:20:17 crc kubenswrapper[4693]: I1122 09:20:17.245923 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-64dbf5859c-znqsl"] Nov 22 09:20:18 crc kubenswrapper[4693]: I1122 09:20:18.159611 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" path="/var/lib/kubelet/pods/cec12aaf-a087-48db-b983-9d75767c38d1/volumes" Nov 22 09:20:21 crc kubenswrapper[4693]: I1122 09:20:21.477850 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:21 crc kubenswrapper[4693]: I1122 09:20:21.494604 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:21 crc kubenswrapper[4693]: I1122 09:20:21.933768 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.057926 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-mgpc5"] Nov 22 09:20:22 crc kubenswrapper[4693]: E1122 09:20:22.058237 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" containerName="dnsmasq-dns" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.058253 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" containerName="dnsmasq-dns" Nov 22 09:20:22 crc kubenswrapper[4693]: E1122 09:20:22.058281 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" containerName="init" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.058286 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" containerName="init" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.058425 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="cec12aaf-a087-48db-b983-9d75767c38d1" containerName="dnsmasq-dns" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.058955 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.060907 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.064506 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.078711 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgpc5"] Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.119024 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-scripts\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.119439 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.119485 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.119698 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r4vzt\" (UniqueName: \"kubernetes.io/projected/215c5e95-2af2-4e5f-8ff4-1587744ac34d-kube-api-access-r4vzt\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.221724 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r4vzt\" (UniqueName: \"kubernetes.io/projected/215c5e95-2af2-4e5f-8ff4-1587744ac34d-kube-api-access-r4vzt\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.221812 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-scripts\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.221910 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.221979 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.226981 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-scripts\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.227446 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.228132 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.236061 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r4vzt\" (UniqueName: \"kubernetes.io/projected/215c5e95-2af2-4e5f-8ff4-1587744ac34d-kube-api-access-r4vzt\") pod \"nova-cell1-cell-mapping-mgpc5\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.389870 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.764581 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgpc5"] Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.929155 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgpc5" event={"ID":"215c5e95-2af2-4e5f-8ff4-1587744ac34d","Type":"ContainerStarted","Data":"1b3f757c798e5c588e3a3b320fd7c11ee0a5bf14d282349c92ee5510aefe6b22"} Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.929623 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgpc5" event={"ID":"215c5e95-2af2-4e5f-8ff4-1587744ac34d","Type":"ContainerStarted","Data":"9eb2b6f9e377d226418364be1be9358feeda9f103d3468262f95733e26a7cca5"} Nov 22 09:20:22 crc kubenswrapper[4693]: I1122 09:20:22.947649 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-mgpc5" podStartSLOduration=0.947629703 podStartE2EDuration="947.629703ms" podCreationTimestamp="2025-11-22 09:20:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:22.940960548 +0000 UTC m=+1019.083462840" watchObservedRunningTime="2025-11-22 09:20:22.947629703 +0000 UTC m=+1019.090131994" Nov 22 09:20:23 crc kubenswrapper[4693]: I1122 09:20:23.207530 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 09:20:23 crc kubenswrapper[4693]: I1122 09:20:23.207774 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 09:20:24 crc kubenswrapper[4693]: I1122 09:20:24.217956 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 09:20:24 crc kubenswrapper[4693]: I1122 09:20:24.217978 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.200:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 09:20:26 crc kubenswrapper[4693]: I1122 09:20:26.966723 4693 generic.go:334] "Generic (PLEG): container finished" podID="215c5e95-2af2-4e5f-8ff4-1587744ac34d" containerID="1b3f757c798e5c588e3a3b320fd7c11ee0a5bf14d282349c92ee5510aefe6b22" exitCode=0 Nov 22 09:20:26 crc kubenswrapper[4693]: I1122 09:20:26.966780 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgpc5" event={"ID":"215c5e95-2af2-4e5f-8ff4-1587744ac34d","Type":"ContainerDied","Data":"1b3f757c798e5c588e3a3b320fd7c11ee0a5bf14d282349c92ee5510aefe6b22"} Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.233444 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.334107 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data\") pod \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.334320 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r4vzt\" (UniqueName: \"kubernetes.io/projected/215c5e95-2af2-4e5f-8ff4-1587744ac34d-kube-api-access-r4vzt\") pod \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.334343 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-combined-ca-bundle\") pod \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.334380 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-scripts\") pod \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.338931 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-scripts" (OuterVolumeSpecName: "scripts") pod "215c5e95-2af2-4e5f-8ff4-1587744ac34d" (UID: "215c5e95-2af2-4e5f-8ff4-1587744ac34d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.338944 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/215c5e95-2af2-4e5f-8ff4-1587744ac34d-kube-api-access-r4vzt" (OuterVolumeSpecName: "kube-api-access-r4vzt") pod "215c5e95-2af2-4e5f-8ff4-1587744ac34d" (UID: "215c5e95-2af2-4e5f-8ff4-1587744ac34d"). InnerVolumeSpecName "kube-api-access-r4vzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:28 crc kubenswrapper[4693]: E1122 09:20:28.353624 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data podName:215c5e95-2af2-4e5f-8ff4-1587744ac34d nodeName:}" failed. No retries permitted until 2025-11-22 09:20:28.853603843 +0000 UTC m=+1024.996106134 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config-data" (UniqueName: "kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data") pod "215c5e95-2af2-4e5f-8ff4-1587744ac34d" (UID: "215c5e95-2af2-4e5f-8ff4-1587744ac34d") : error deleting /var/lib/kubelet/pods/215c5e95-2af2-4e5f-8ff4-1587744ac34d/volume-subpaths: remove /var/lib/kubelet/pods/215c5e95-2af2-4e5f-8ff4-1587744ac34d/volume-subpaths: no such file or directory Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.355406 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "215c5e95-2af2-4e5f-8ff4-1587744ac34d" (UID: "215c5e95-2af2-4e5f-8ff4-1587744ac34d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.435730 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.435754 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r4vzt\" (UniqueName: \"kubernetes.io/projected/215c5e95-2af2-4e5f-8ff4-1587744ac34d-kube-api-access-r4vzt\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.435764 4693 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-scripts\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.941521 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data\") pod \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\" (UID: \"215c5e95-2af2-4e5f-8ff4-1587744ac34d\") " Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.944417 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data" (OuterVolumeSpecName: "config-data") pod "215c5e95-2af2-4e5f-8ff4-1587744ac34d" (UID: "215c5e95-2af2-4e5f-8ff4-1587744ac34d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.991473 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgpc5" event={"ID":"215c5e95-2af2-4e5f-8ff4-1587744ac34d","Type":"ContainerDied","Data":"9eb2b6f9e377d226418364be1be9358feeda9f103d3468262f95733e26a7cca5"} Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.991513 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9eb2b6f9e377d226418364be1be9358feeda9f103d3468262f95733e26a7cca5" Nov 22 09:20:28 crc kubenswrapper[4693]: I1122 09:20:28.991556 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgpc5" Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.043032 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/215c5e95-2af2-4e5f-8ff4-1587744ac34d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.154510 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.154697 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-log" containerID="cri-o://3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9" gracePeriod=30 Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.154758 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-api" containerID="cri-o://69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac" gracePeriod=30 Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.161578 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.161773 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="a508ae94-3587-467b-91b7-8bbf728c1d4d" containerName="nova-scheduler-scheduler" containerID="cri-o://3d04f1b0b2cf3fca708add37a127ec4e3801b1841f8351f9da79a58158dcc5d2" gracePeriod=30 Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.177253 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.177468 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-log" containerID="cri-o://899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5" gracePeriod=30 Nov 22 09:20:29 crc kubenswrapper[4693]: I1122 09:20:29.177538 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-metadata" containerID="cri-o://78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440" gracePeriod=30 Nov 22 09:20:30 crc kubenswrapper[4693]: I1122 09:20:30.001084 4693 generic.go:334] "Generic (PLEG): container finished" podID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerID="899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5" exitCode=143 Nov 22 09:20:30 crc kubenswrapper[4693]: I1122 09:20:30.001192 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3a0f349-b7f9-49cf-8bf7-f60253669f74","Type":"ContainerDied","Data":"899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5"} Nov 22 09:20:30 crc kubenswrapper[4693]: I1122 09:20:30.003797 4693 generic.go:334] "Generic (PLEG): container finished" podID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerID="3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9" exitCode=143 Nov 22 09:20:30 crc kubenswrapper[4693]: I1122 09:20:30.003838 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7244d9a2-04a4-45b6-820b-c66ef9928c1f","Type":"ContainerDied","Data":"3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9"} Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.013186 4693 generic.go:334] "Generic (PLEG): container finished" podID="a508ae94-3587-467b-91b7-8bbf728c1d4d" containerID="3d04f1b0b2cf3fca708add37a127ec4e3801b1841f8351f9da79a58158dcc5d2" exitCode=0 Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.013343 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a508ae94-3587-467b-91b7-8bbf728c1d4d","Type":"ContainerDied","Data":"3d04f1b0b2cf3fca708add37a127ec4e3801b1841f8351f9da79a58158dcc5d2"} Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.241723 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.380699 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqjcm\" (UniqueName: \"kubernetes.io/projected/a508ae94-3587-467b-91b7-8bbf728c1d4d-kube-api-access-zqjcm\") pod \"a508ae94-3587-467b-91b7-8bbf728c1d4d\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.380815 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-combined-ca-bundle\") pod \"a508ae94-3587-467b-91b7-8bbf728c1d4d\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.380860 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-config-data\") pod \"a508ae94-3587-467b-91b7-8bbf728c1d4d\" (UID: \"a508ae94-3587-467b-91b7-8bbf728c1d4d\") " Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.386269 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a508ae94-3587-467b-91b7-8bbf728c1d4d-kube-api-access-zqjcm" (OuterVolumeSpecName: "kube-api-access-zqjcm") pod "a508ae94-3587-467b-91b7-8bbf728c1d4d" (UID: "a508ae94-3587-467b-91b7-8bbf728c1d4d"). InnerVolumeSpecName "kube-api-access-zqjcm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.403198 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a508ae94-3587-467b-91b7-8bbf728c1d4d" (UID: "a508ae94-3587-467b-91b7-8bbf728c1d4d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.403234 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-config-data" (OuterVolumeSpecName: "config-data") pod "a508ae94-3587-467b-91b7-8bbf728c1d4d" (UID: "a508ae94-3587-467b-91b7-8bbf728c1d4d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.483270 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.483313 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a508ae94-3587-467b-91b7-8bbf728c1d4d-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:31 crc kubenswrapper[4693]: I1122 09:20:31.483324 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqjcm\" (UniqueName: \"kubernetes.io/projected/a508ae94-3587-467b-91b7-8bbf728c1d4d-kube-api-access-zqjcm\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.020281 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"a508ae94-3587-467b-91b7-8bbf728c1d4d","Type":"ContainerDied","Data":"37a5d975819696e1d3bb69ef8a57df049de1c288de16e87a413ab533326361c8"} Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.020323 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.020580 4693 scope.go:117] "RemoveContainer" containerID="3d04f1b0b2cf3fca708add37a127ec4e3801b1841f8351f9da79a58158dcc5d2" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.043141 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.048609 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.060609 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:20:32 crc kubenswrapper[4693]: E1122 09:20:32.060960 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a508ae94-3587-467b-91b7-8bbf728c1d4d" containerName="nova-scheduler-scheduler" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.060978 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a508ae94-3587-467b-91b7-8bbf728c1d4d" containerName="nova-scheduler-scheduler" Nov 22 09:20:32 crc kubenswrapper[4693]: E1122 09:20:32.061020 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="215c5e95-2af2-4e5f-8ff4-1587744ac34d" containerName="nova-manage" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.061026 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="215c5e95-2af2-4e5f-8ff4-1587744ac34d" containerName="nova-manage" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.061199 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a508ae94-3587-467b-91b7-8bbf728c1d4d" containerName="nova-scheduler-scheduler" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.061219 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="215c5e95-2af2-4e5f-8ff4-1587744ac34d" containerName="nova-manage" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.061811 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.065255 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.078862 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.091364 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0427665-286b-4058-bec9-917d31c200e3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.091512 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhsmt\" (UniqueName: \"kubernetes.io/projected/d0427665-286b-4058-bec9-917d31c200e3-kube-api-access-hhsmt\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.091634 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0427665-286b-4058-bec9-917d31c200e3-config-data\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.154957 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a508ae94-3587-467b-91b7-8bbf728c1d4d" path="/var/lib/kubelet/pods/a508ae94-3587-467b-91b7-8bbf728c1d4d/volumes" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.193578 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0427665-286b-4058-bec9-917d31c200e3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.193649 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhsmt\" (UniqueName: \"kubernetes.io/projected/d0427665-286b-4058-bec9-917d31c200e3-kube-api-access-hhsmt\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.193732 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0427665-286b-4058-bec9-917d31c200e3-config-data\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.198762 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0427665-286b-4058-bec9-917d31c200e3-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.198763 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0427665-286b-4058-bec9-917d31c200e3-config-data\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.207571 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhsmt\" (UniqueName: \"kubernetes.io/projected/d0427665-286b-4058-bec9-917d31c200e3-kube-api-access-hhsmt\") pod \"nova-scheduler-0\" (UID: \"d0427665-286b-4058-bec9-917d31c200e3\") " pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.380556 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.669396 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.712325 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814454 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-internal-tls-certs\") pod \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814523 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-combined-ca-bundle\") pod \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814604 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7244d9a2-04a4-45b6-820b-c66ef9928c1f-logs\") pod \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814670 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-nova-metadata-tls-certs\") pod \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814695 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cxcxd\" (UniqueName: \"kubernetes.io/projected/7244d9a2-04a4-45b6-820b-c66ef9928c1f-kube-api-access-cxcxd\") pod \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814713 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3a0f349-b7f9-49cf-8bf7-f60253669f74-logs\") pod \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814789 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-config-data\") pod \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814882 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-config-data\") pod \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814909 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-combined-ca-bundle\") pod \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814944 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-public-tls-certs\") pod \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\" (UID: \"7244d9a2-04a4-45b6-820b-c66ef9928c1f\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.814982 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zhn4q\" (UniqueName: \"kubernetes.io/projected/c3a0f349-b7f9-49cf-8bf7-f60253669f74-kube-api-access-zhn4q\") pod \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\" (UID: \"c3a0f349-b7f9-49cf-8bf7-f60253669f74\") " Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.815600 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7244d9a2-04a4-45b6-820b-c66ef9928c1f-logs" (OuterVolumeSpecName: "logs") pod "7244d9a2-04a4-45b6-820b-c66ef9928c1f" (UID: "7244d9a2-04a4-45b6-820b-c66ef9928c1f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.815613 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3a0f349-b7f9-49cf-8bf7-f60253669f74-logs" (OuterVolumeSpecName: "logs") pod "c3a0f349-b7f9-49cf-8bf7-f60253669f74" (UID: "c3a0f349-b7f9-49cf-8bf7-f60253669f74"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.820662 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7244d9a2-04a4-45b6-820b-c66ef9928c1f-kube-api-access-cxcxd" (OuterVolumeSpecName: "kube-api-access-cxcxd") pod "7244d9a2-04a4-45b6-820b-c66ef9928c1f" (UID: "7244d9a2-04a4-45b6-820b-c66ef9928c1f"). InnerVolumeSpecName "kube-api-access-cxcxd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.821299 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3a0f349-b7f9-49cf-8bf7-f60253669f74-kube-api-access-zhn4q" (OuterVolumeSpecName: "kube-api-access-zhn4q") pod "c3a0f349-b7f9-49cf-8bf7-f60253669f74" (UID: "c3a0f349-b7f9-49cf-8bf7-f60253669f74"). InnerVolumeSpecName "kube-api-access-zhn4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.840172 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7244d9a2-04a4-45b6-820b-c66ef9928c1f" (UID: "7244d9a2-04a4-45b6-820b-c66ef9928c1f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.843178 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3a0f349-b7f9-49cf-8bf7-f60253669f74" (UID: "c3a0f349-b7f9-49cf-8bf7-f60253669f74"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.844981 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-config-data" (OuterVolumeSpecName: "config-data") pod "c3a0f349-b7f9-49cf-8bf7-f60253669f74" (UID: "c3a0f349-b7f9-49cf-8bf7-f60253669f74"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.846678 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-config-data" (OuterVolumeSpecName: "config-data") pod "7244d9a2-04a4-45b6-820b-c66ef9928c1f" (UID: "7244d9a2-04a4-45b6-820b-c66ef9928c1f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.861426 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "7244d9a2-04a4-45b6-820b-c66ef9928c1f" (UID: "7244d9a2-04a4-45b6-820b-c66ef9928c1f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.866689 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "7244d9a2-04a4-45b6-820b-c66ef9928c1f" (UID: "7244d9a2-04a4-45b6-820b-c66ef9928c1f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.873540 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.878531 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "c3a0f349-b7f9-49cf-8bf7-f60253669f74" (UID: "c3a0f349-b7f9-49cf-8bf7-f60253669f74"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917504 4693 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917539 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917550 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7244d9a2-04a4-45b6-820b-c66ef9928c1f-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917560 4693 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917571 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cxcxd\" (UniqueName: \"kubernetes.io/projected/7244d9a2-04a4-45b6-820b-c66ef9928c1f-kube-api-access-cxcxd\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917580 4693 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3a0f349-b7f9-49cf-8bf7-f60253669f74-logs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917587 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3a0f349-b7f9-49cf-8bf7-f60253669f74-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917596 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917604 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917612 4693 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7244d9a2-04a4-45b6-820b-c66ef9928c1f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:32 crc kubenswrapper[4693]: I1122 09:20:32.917620 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zhn4q\" (UniqueName: \"kubernetes.io/projected/c3a0f349-b7f9-49cf-8bf7-f60253669f74-kube-api-access-zhn4q\") on node \"crc\" DevicePath \"\"" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.029457 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d0427665-286b-4058-bec9-917d31c200e3","Type":"ContainerStarted","Data":"60f1dbde60bfbb4255acd676edb3b4200fa09ff4364722fb6e9afe1c982aa71d"} Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.029513 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"d0427665-286b-4058-bec9-917d31c200e3","Type":"ContainerStarted","Data":"e2bf75f708ed92cc146ebfacbbb6fbc2132fe2b589b87ddf53b563767e6fcfd7"} Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.033136 4693 generic.go:334] "Generic (PLEG): container finished" podID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerID="69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac" exitCode=0 Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.033194 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7244d9a2-04a4-45b6-820b-c66ef9928c1f","Type":"ContainerDied","Data":"69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac"} Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.033216 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"7244d9a2-04a4-45b6-820b-c66ef9928c1f","Type":"ContainerDied","Data":"ee935354ecbed267204998dce016915f55d180e3a6fabb1b22aa79b4fd58ea02"} Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.033234 4693 scope.go:117] "RemoveContainer" containerID="69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.033384 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.036412 4693 generic.go:334] "Generic (PLEG): container finished" podID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerID="78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440" exitCode=0 Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.036451 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3a0f349-b7f9-49cf-8bf7-f60253669f74","Type":"ContainerDied","Data":"78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440"} Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.036477 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c3a0f349-b7f9-49cf-8bf7-f60253669f74","Type":"ContainerDied","Data":"cc9049757cbdb0fa5e9f98786dd9868956a2636302b6b9a76d91092e51c110c6"} Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.036530 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.050562 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=1.050546003 podStartE2EDuration="1.050546003s" podCreationTimestamp="2025-11-22 09:20:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:33.038980059 +0000 UTC m=+1029.181482350" watchObservedRunningTime="2025-11-22 09:20:33.050546003 +0000 UTC m=+1029.193048314" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.065034 4693 scope.go:117] "RemoveContainer" containerID="3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.086487 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.097733 4693 scope.go:117] "RemoveContainer" containerID="69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.098294 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac\": container with ID starting with 69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac not found: ID does not exist" containerID="69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.098364 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac"} err="failed to get container status \"69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac\": rpc error: code = NotFound desc = could not find container \"69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac\": container with ID starting with 69325b1a699acfaadc575d241ebfe8c4b3b7dc0ad0d0c791291bcec45acb29ac not found: ID does not exist" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.098404 4693 scope.go:117] "RemoveContainer" containerID="3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.099083 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9\": container with ID starting with 3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9 not found: ID does not exist" containerID="3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.099107 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9"} err="failed to get container status \"3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9\": rpc error: code = NotFound desc = could not find container \"3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9\": container with ID starting with 3cc5178db7d2fcd4d86399d649d0d6d23cb994a7a4bb088f474c8f19773f99c9 not found: ID does not exist" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.099123 4693 scope.go:117] "RemoveContainer" containerID="78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.103138 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.113523 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.117772 4693 scope.go:117] "RemoveContainer" containerID="899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.120057 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126230 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.126688 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-metadata" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126706 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-metadata" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.126721 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-api" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126728 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-api" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.126748 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-log" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126754 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-log" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.126774 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-log" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126779 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-log" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126957 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-api" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126971 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-log" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.126989 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" containerName="nova-metadata-metadata" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.127000 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" containerName="nova-api-log" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.128124 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.130464 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.130697 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.130817 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.131901 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.132451 4693 scope.go:117] "RemoveContainer" containerID="78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.132730 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440\": container with ID starting with 78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440 not found: ID does not exist" containerID="78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.132760 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440"} err="failed to get container status \"78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440\": rpc error: code = NotFound desc = could not find container \"78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440\": container with ID starting with 78d712263c5c1506dea424c1c47541b2798f02527b1a6a2d2eefee453a60b440 not found: ID does not exist" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.132780 4693 scope.go:117] "RemoveContainer" containerID="899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5" Nov 22 09:20:33 crc kubenswrapper[4693]: E1122 09:20:33.133055 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5\": container with ID starting with 899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5 not found: ID does not exist" containerID="899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.133078 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5"} err="failed to get container status \"899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5\": rpc error: code = NotFound desc = could not find container \"899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5\": container with ID starting with 899b29082046fba905152fd0bd27f509f27c9d0f49a9b4751385377eb6bab5d5 not found: ID does not exist" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.137020 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.140938 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.142821 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.143136 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.143768 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224566 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-public-tls-certs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224624 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224647 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxdzs\" (UniqueName: \"kubernetes.io/projected/1569b6f7-3def-4eb6-87e7-5705b74b1fed-kube-api-access-xxdzs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224674 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6d59q\" (UniqueName: \"kubernetes.io/projected/5079844b-9574-4583-97b2-8232271e5681-kube-api-access-6d59q\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224742 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1569b6f7-3def-4eb6-87e7-5705b74b1fed-logs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224784 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.224809 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-config-data\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.225000 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.225122 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-config-data\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.225200 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5079844b-9574-4583-97b2-8232271e5681-logs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.225255 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.327583 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.327905 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-config-data\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.327965 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328004 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-config-data\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328032 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5079844b-9574-4583-97b2-8232271e5681-logs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328084 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328123 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-public-tls-certs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328154 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328172 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxdzs\" (UniqueName: \"kubernetes.io/projected/1569b6f7-3def-4eb6-87e7-5705b74b1fed-kube-api-access-xxdzs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328194 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6d59q\" (UniqueName: \"kubernetes.io/projected/5079844b-9574-4583-97b2-8232271e5681-kube-api-access-6d59q\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.328878 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/5079844b-9574-4583-97b2-8232271e5681-logs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.329266 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1569b6f7-3def-4eb6-87e7-5705b74b1fed-logs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.329570 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1569b6f7-3def-4eb6-87e7-5705b74b1fed-logs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.331386 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-config-data\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.331566 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-internal-tls-certs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.331958 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.333092 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-public-tls-certs\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.333364 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5079844b-9574-4583-97b2-8232271e5681-config-data\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.334163 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.343087 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1569b6f7-3def-4eb6-87e7-5705b74b1fed-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.343626 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxdzs\" (UniqueName: \"kubernetes.io/projected/1569b6f7-3def-4eb6-87e7-5705b74b1fed-kube-api-access-xxdzs\") pod \"nova-metadata-0\" (UID: \"1569b6f7-3def-4eb6-87e7-5705b74b1fed\") " pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.343628 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6d59q\" (UniqueName: \"kubernetes.io/projected/5079844b-9574-4583-97b2-8232271e5681-kube-api-access-6d59q\") pod \"nova-api-0\" (UID: \"5079844b-9574-4583-97b2-8232271e5681\") " pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.443491 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.454685 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.826480 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: I1122 09:20:33.891210 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Nov 22 09:20:33 crc kubenswrapper[4693]: W1122 09:20:33.897826 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1569b6f7_3def_4eb6_87e7_5705b74b1fed.slice/crio-bfa7a74f669a3baaaef4beb8cd0498dfb72d769854413e7bea76e1425a3ca56e WatchSource:0}: Error finding container bfa7a74f669a3baaaef4beb8cd0498dfb72d769854413e7bea76e1425a3ca56e: Status 404 returned error can't find the container with id bfa7a74f669a3baaaef4beb8cd0498dfb72d769854413e7bea76e1425a3ca56e Nov 22 09:20:34 crc kubenswrapper[4693]: I1122 09:20:34.059433 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1569b6f7-3def-4eb6-87e7-5705b74b1fed","Type":"ContainerStarted","Data":"56e0afe3f02e589d1140d6bc59b0b784954e8e10631492da45641fd11fb77b2e"} Nov 22 09:20:34 crc kubenswrapper[4693]: I1122 09:20:34.059478 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1569b6f7-3def-4eb6-87e7-5705b74b1fed","Type":"ContainerStarted","Data":"bfa7a74f669a3baaaef4beb8cd0498dfb72d769854413e7bea76e1425a3ca56e"} Nov 22 09:20:34 crc kubenswrapper[4693]: I1122 09:20:34.061028 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5079844b-9574-4583-97b2-8232271e5681","Type":"ContainerStarted","Data":"0616692461aee35fda962e64bcad32735a882b5476785c440dd61ed5f0056340"} Nov 22 09:20:34 crc kubenswrapper[4693]: I1122 09:20:34.061326 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5079844b-9574-4583-97b2-8232271e5681","Type":"ContainerStarted","Data":"6b9effaf09af9fff7a8dbf56b3c72e3fcb7cac784dfac9d29654164b4fa919bf"} Nov 22 09:20:34 crc kubenswrapper[4693]: I1122 09:20:34.156225 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7244d9a2-04a4-45b6-820b-c66ef9928c1f" path="/var/lib/kubelet/pods/7244d9a2-04a4-45b6-820b-c66ef9928c1f/volumes" Nov 22 09:20:34 crc kubenswrapper[4693]: I1122 09:20:34.156880 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3a0f349-b7f9-49cf-8bf7-f60253669f74" path="/var/lib/kubelet/pods/c3a0f349-b7f9-49cf-8bf7-f60253669f74/volumes" Nov 22 09:20:35 crc kubenswrapper[4693]: I1122 09:20:35.077270 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1569b6f7-3def-4eb6-87e7-5705b74b1fed","Type":"ContainerStarted","Data":"63e268b75011fb1a777e3916d3ce571f24a3bfa2099a63452ffe30706d076ebb"} Nov 22 09:20:35 crc kubenswrapper[4693]: I1122 09:20:35.082664 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"5079844b-9574-4583-97b2-8232271e5681","Type":"ContainerStarted","Data":"90efb0b60b71d95524fb5c0700cfe53620d197ea58e72af0fdf04883013f52c3"} Nov 22 09:20:35 crc kubenswrapper[4693]: I1122 09:20:35.098707 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.098695668 podStartE2EDuration="2.098695668s" podCreationTimestamp="2025-11-22 09:20:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:35.091828412 +0000 UTC m=+1031.234330703" watchObservedRunningTime="2025-11-22 09:20:35.098695668 +0000 UTC m=+1031.241197959" Nov 22 09:20:35 crc kubenswrapper[4693]: I1122 09:20:35.109705 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.109692381 podStartE2EDuration="2.109692381s" podCreationTimestamp="2025-11-22 09:20:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:20:35.105419895 +0000 UTC m=+1031.247922187" watchObservedRunningTime="2025-11-22 09:20:35.109692381 +0000 UTC m=+1031.252194672" Nov 22 09:20:37 crc kubenswrapper[4693]: I1122 09:20:37.380696 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Nov 22 09:20:38 crc kubenswrapper[4693]: I1122 09:20:38.455565 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 09:20:38 crc kubenswrapper[4693]: I1122 09:20:38.455658 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Nov 22 09:20:40 crc kubenswrapper[4693]: I1122 09:20:40.160145 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Nov 22 09:20:42 crc kubenswrapper[4693]: I1122 09:20:42.381806 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Nov 22 09:20:42 crc kubenswrapper[4693]: I1122 09:20:42.408121 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Nov 22 09:20:43 crc kubenswrapper[4693]: I1122 09:20:43.151547 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Nov 22 09:20:43 crc kubenswrapper[4693]: I1122 09:20:43.444549 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 09:20:43 crc kubenswrapper[4693]: I1122 09:20:43.444746 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Nov 22 09:20:43 crc kubenswrapper[4693]: I1122 09:20:43.455148 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 09:20:43 crc kubenswrapper[4693]: I1122 09:20:43.455176 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Nov 22 09:20:44 crc kubenswrapper[4693]: I1122 09:20:44.476959 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5079844b-9574-4583-97b2-8232271e5681" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 09:20:44 crc kubenswrapper[4693]: I1122 09:20:44.476997 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="5079844b-9574-4583-97b2-8232271e5681" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.203:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 22 09:20:44 crc kubenswrapper[4693]: I1122 09:20:44.476968 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1569b6f7-3def-4eb6-87e7-5705b74b1fed" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 09:20:44 crc kubenswrapper[4693]: I1122 09:20:44.477047 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1569b6f7-3def-4eb6-87e7-5705b74b1fed" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.204:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.449213 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.449592 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.450166 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.450193 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.453732 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.454602 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.459680 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.460044 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Nov 22 09:20:53 crc kubenswrapper[4693]: I1122 09:20:53.466643 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 09:20:54 crc kubenswrapper[4693]: I1122 09:20:54.206476 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Nov 22 09:21:00 crc kubenswrapper[4693]: I1122 09:21:00.246500 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:21:00 crc kubenswrapper[4693]: I1122 09:21:00.246893 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:21:00 crc kubenswrapper[4693]: I1122 09:21:00.417377 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:21:01 crc kubenswrapper[4693]: I1122 09:21:01.017552 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:21:03 crc kubenswrapper[4693]: I1122 09:21:03.479631 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" containerName="rabbitmq" containerID="cri-o://41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d" gracePeriod=604797 Nov 22 09:21:03 crc kubenswrapper[4693]: I1122 09:21:03.976136 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerName="rabbitmq" containerID="cri-o://ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd" gracePeriod=604798 Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.843387 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998292 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99df5d88-540a-495c-a688-43f4d63ffa45-erlang-cookie-secret\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998422 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-plugins-conf\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998468 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998508 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dthks\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-kube-api-access-dthks\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998534 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-server-conf\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998560 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-plugins\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998579 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-tls\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998623 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-erlang-cookie\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998647 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-config-data\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998664 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-confd\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.998701 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99df5d88-540a-495c-a688-43f4d63ffa45-pod-info\") pod \"99df5d88-540a-495c-a688-43f4d63ffa45\" (UID: \"99df5d88-540a-495c-a688-43f4d63ffa45\") " Nov 22 09:21:09 crc kubenswrapper[4693]: I1122 09:21:09.999391 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.002128 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.002520 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.003563 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.006079 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/99df5d88-540a-495c-a688-43f4d63ffa45-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.006794 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.007884 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-kube-api-access-dthks" (OuterVolumeSpecName: "kube-api-access-dthks") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "kube-api-access-dthks". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.008921 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/99df5d88-540a-495c-a688-43f4d63ffa45-pod-info" (OuterVolumeSpecName: "pod-info") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.020100 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-config-data" (OuterVolumeSpecName: "config-data") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.051627 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-server-conf" (OuterVolumeSpecName: "server-conf") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.074018 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "99df5d88-540a-495c-a688-43f4d63ffa45" (UID: "99df5d88-540a-495c-a688-43f4d63ffa45"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.100961 4693 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101006 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101017 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dthks\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-kube-api-access-dthks\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101027 4693 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-server-conf\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101035 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101043 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101050 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101057 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/99df5d88-540a-495c-a688-43f4d63ffa45-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101064 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/99df5d88-540a-495c-a688-43f4d63ffa45-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101071 4693 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/99df5d88-540a-495c-a688-43f4d63ffa45-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.101079 4693 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/99df5d88-540a-495c-a688-43f4d63ffa45-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.118007 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.202670 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.300905 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.330752 4693 generic.go:334] "Generic (PLEG): container finished" podID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerID="ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd" exitCode=0 Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.330805 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c687d537-2713-42cf-9f20-ef640bbd6c3c","Type":"ContainerDied","Data":"ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd"} Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.330828 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"c687d537-2713-42cf-9f20-ef640bbd6c3c","Type":"ContainerDied","Data":"a447cb3116dc44bba0590fe10860acd5e9ef50a863af88e970731f931244d8f1"} Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.330859 4693 scope.go:117] "RemoveContainer" containerID="ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.330960 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.335809 4693 generic.go:334] "Generic (PLEG): container finished" podID="99df5d88-540a-495c-a688-43f4d63ffa45" containerID="41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d" exitCode=0 Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.335836 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"99df5d88-540a-495c-a688-43f4d63ffa45","Type":"ContainerDied","Data":"41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d"} Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.335870 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"99df5d88-540a-495c-a688-43f4d63ffa45","Type":"ContainerDied","Data":"2db6bd12d446cd26dc981f1920e57e3d5c5c972f118793e0f0c4b2d55d3482e3"} Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.335879 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.354746 4693 scope.go:117] "RemoveContainer" containerID="6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.359629 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.365485 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379270 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.379582 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerName="setup-container" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379597 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerName="setup-container" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.379617 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerName="rabbitmq" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379623 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerName="rabbitmq" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.379644 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" containerName="rabbitmq" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379649 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" containerName="rabbitmq" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.379659 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" containerName="setup-container" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379665 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" containerName="setup-container" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379806 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" containerName="rabbitmq" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.379831 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" containerName="rabbitmq" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.380896 4693 scope.go:117] "RemoveContainer" containerID="ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.380910 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.383172 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd\": container with ID starting with ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd not found: ID does not exist" containerID="ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.383204 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd"} err="failed to get container status \"ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd\": rpc error: code = NotFound desc = could not find container \"ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd\": container with ID starting with ca15138eb81c368ee756e0a8bdc7669cc5491259ecb494331c30c5f4fef7bebd not found: ID does not exist" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.383241 4693 scope.go:117] "RemoveContainer" containerID="6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.385293 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.385721 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-2qhj8" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.385899 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.386153 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.386301 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.386440 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.386570 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.387289 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef\": container with ID starting with 6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef not found: ID does not exist" containerID="6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.387344 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef"} err="failed to get container status \"6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef\": rpc error: code = NotFound desc = could not find container \"6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef\": container with ID starting with 6b91a4a1e49606043a653457e28dc247f03a894350e3182e2c532d7e629f00ef not found: ID does not exist" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.387360 4693 scope.go:117] "RemoveContainer" containerID="41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.402992 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408195 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w6fxg\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-kube-api-access-w6fxg\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408259 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-plugins-conf\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408287 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-server-conf\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408318 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408393 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-erlang-cookie\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408410 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-confd\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408440 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-plugins\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408468 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c687d537-2713-42cf-9f20-ef640bbd6c3c-erlang-cookie-secret\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408516 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-config-data\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408575 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-tls\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.408590 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c687d537-2713-42cf-9f20-ef640bbd6c3c-pod-info\") pod \"c687d537-2713-42cf-9f20-ef640bbd6c3c\" (UID: \"c687d537-2713-42cf-9f20-ef640bbd6c3c\") " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.410184 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.412544 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/c687d537-2713-42cf-9f20-ef640bbd6c3c-pod-info" (OuterVolumeSpecName: "pod-info") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.412937 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.413380 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c687d537-2713-42cf-9f20-ef640bbd6c3c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.413671 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.414750 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-kube-api-access-w6fxg" (OuterVolumeSpecName: "kube-api-access-w6fxg") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "kube-api-access-w6fxg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.418326 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.418435 4693 scope.go:117] "RemoveContainer" containerID="b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.419000 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "persistence") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.433171 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-config-data" (OuterVolumeSpecName: "config-data") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.438106 4693 scope.go:117] "RemoveContainer" containerID="41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.438420 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d\": container with ID starting with 41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d not found: ID does not exist" containerID="41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.438448 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d"} err="failed to get container status \"41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d\": rpc error: code = NotFound desc = could not find container \"41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d\": container with ID starting with 41c819975248bc179fd9d3c5bbcd728c56acdce401d9fc05cbeddaedef31155d not found: ID does not exist" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.438466 4693 scope.go:117] "RemoveContainer" containerID="b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4" Nov 22 09:21:10 crc kubenswrapper[4693]: E1122 09:21:10.438624 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4\": container with ID starting with b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4 not found: ID does not exist" containerID="b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.438645 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4"} err="failed to get container status \"b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4\": rpc error: code = NotFound desc = could not find container \"b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4\": container with ID starting with b4a54e2c9c9b3590d807c5ec5cb1d80715aee4da103ed89a00485a765fcbe7b4 not found: ID does not exist" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.451038 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-server-conf" (OuterVolumeSpecName: "server-conf") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.490009 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "c687d537-2713-42cf-9f20-ef640bbd6c3c" (UID: "c687d537-2713-42cf-9f20-ef640bbd6c3c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.510796 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.510830 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/95a8e4f8-d504-40f1-8137-34a70c82e9cb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.510872 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/95a8e4f8-d504-40f1-8137-34a70c82e9cb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.510970 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511067 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511247 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511347 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511452 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511509 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldcj8\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-kube-api-access-ldcj8\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511542 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-config-data\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511566 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511650 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511666 4693 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/c687d537-2713-42cf-9f20-ef640bbd6c3c-pod-info\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511675 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w6fxg\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-kube-api-access-w6fxg\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511686 4693 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-plugins-conf\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511693 4693 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-server-conf\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511716 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511726 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511733 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511741 4693 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/c687d537-2713-42cf-9f20-ef640bbd6c3c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511749 4693 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/c687d537-2713-42cf-9f20-ef640bbd6c3c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.511757 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/c687d537-2713-42cf-9f20-ef640bbd6c3c-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.526095 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626071 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626144 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626223 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626272 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldcj8\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-kube-api-access-ldcj8\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626302 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-config-data\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626326 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626410 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626428 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/95a8e4f8-d504-40f1-8137-34a70c82e9cb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626455 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/95a8e4f8-d504-40f1-8137-34a70c82e9cb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626484 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626543 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.626579 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.627229 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.627273 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.627449 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.627797 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-config-data\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.627890 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.630686 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.631729 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.633412 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/95a8e4f8-d504-40f1-8137-34a70c82e9cb-server-conf\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.635791 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/95a8e4f8-d504-40f1-8137-34a70c82e9cb-pod-info\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.636178 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/95a8e4f8-d504-40f1-8137-34a70c82e9cb-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.642491 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldcj8\" (UniqueName: \"kubernetes.io/projected/95a8e4f8-d504-40f1-8137-34a70c82e9cb-kube-api-access-ldcj8\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.663676 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-server-0\" (UID: \"95a8e4f8-d504-40f1-8137-34a70c82e9cb\") " pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.710870 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.732894 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.748891 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.768480 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.774282 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.779541 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.779754 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.779941 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.780173 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.780304 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.780487 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-hqxpl" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.780623 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.792995 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935196 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935439 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935463 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c7kvl\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-kube-api-access-c7kvl\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935507 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0753e7f-679e-4da7-a765-d1d220684511-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935526 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935542 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935567 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935620 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935633 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0753e7f-679e-4da7-a765-d1d220684511-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935669 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:10 crc kubenswrapper[4693]: I1122 09:21:10.935685 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036669 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036703 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036744 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036776 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036798 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c7kvl\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-kube-api-access-c7kvl\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036836 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0753e7f-679e-4da7-a765-d1d220684511-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036867 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036882 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036905 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.036946 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.037206 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0753e7f-679e-4da7-a765-d1d220684511-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.037271 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.037479 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.038091 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.038119 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.038464 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.038716 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/b0753e7f-679e-4da7-a765-d1d220684511-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.041442 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.041545 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/b0753e7f-679e-4da7-a765-d1d220684511-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.041772 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/b0753e7f-679e-4da7-a765-d1d220684511-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.041812 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.051540 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c7kvl\" (UniqueName: \"kubernetes.io/projected/b0753e7f-679e-4da7-a765-d1d220684511-kube-api-access-c7kvl\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.057188 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"b0753e7f-679e-4da7-a765-d1d220684511\") " pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.114263 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.222249 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.345022 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"95a8e4f8-d504-40f1-8137-34a70c82e9cb","Type":"ContainerStarted","Data":"46cdbd02da93993372c02617125c6e282d98eb76d6cb64839d58e99baffc5fca"} Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.454715 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f7944d86c-szk2w"] Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.456040 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.457568 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.464537 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f7944d86c-szk2w"] Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.512149 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Nov 22 09:21:11 crc kubenswrapper[4693]: W1122 09:21:11.514551 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0753e7f_679e_4da7_a765_d1d220684511.slice/crio-cf76e58be56cd5ea87a7cbf55a426a25f3265644ccb74cfcffd521e243e8f09c WatchSource:0}: Error finding container cf76e58be56cd5ea87a7cbf55a426a25f3265644ccb74cfcffd521e243e8f09c: Status 404 returned error can't find the container with id cf76e58be56cd5ea87a7cbf55a426a25f3265644ccb74cfcffd521e243e8f09c Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546004 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-svc\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546041 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-config\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546100 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-nb\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546123 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-sb\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546161 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c95tg\" (UniqueName: \"kubernetes.io/projected/81418e14-1d60-45c9-8897-c8d0d3ba8be6-kube-api-access-c95tg\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546183 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-openstack-edpm-ipam\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.546199 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-swift-storage-0\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.647917 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-openstack-edpm-ipam\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648120 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-swift-storage-0\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648183 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-svc\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648199 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-config\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648252 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-nb\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648276 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-sb\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648312 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c95tg\" (UniqueName: \"kubernetes.io/projected/81418e14-1d60-45c9-8897-c8d0d3ba8be6-kube-api-access-c95tg\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.648669 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-openstack-edpm-ipam\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.649159 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-config\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.649238 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-nb\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.649311 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-swift-storage-0\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.649317 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-svc\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.649415 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-sb\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.662265 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c95tg\" (UniqueName: \"kubernetes.io/projected/81418e14-1d60-45c9-8897-c8d0d3ba8be6-kube-api-access-c95tg\") pod \"dnsmasq-dns-7f7944d86c-szk2w\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:11 crc kubenswrapper[4693]: I1122 09:21:11.769278 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.120851 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f7944d86c-szk2w"] Nov 22 09:21:12 crc kubenswrapper[4693]: W1122 09:21:12.123955 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod81418e14_1d60_45c9_8897_c8d0d3ba8be6.slice/crio-8d2978e4257c51c8d3a6e17395f6f50fd8bb2009f7e1e9b60debb584ffedea8a WatchSource:0}: Error finding container 8d2978e4257c51c8d3a6e17395f6f50fd8bb2009f7e1e9b60debb584ffedea8a: Status 404 returned error can't find the container with id 8d2978e4257c51c8d3a6e17395f6f50fd8bb2009f7e1e9b60debb584ffedea8a Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.160284 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="99df5d88-540a-495c-a688-43f4d63ffa45" path="/var/lib/kubelet/pods/99df5d88-540a-495c-a688-43f4d63ffa45/volumes" Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.161095 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c687d537-2713-42cf-9f20-ef640bbd6c3c" path="/var/lib/kubelet/pods/c687d537-2713-42cf-9f20-ef640bbd6c3c/volumes" Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.358385 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"95a8e4f8-d504-40f1-8137-34a70c82e9cb","Type":"ContainerStarted","Data":"6dffdbdea005b0783a1c90a1425d5ba4bc60e4652da24ac71f29c85fabbf65d9"} Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.359764 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" event={"ID":"81418e14-1d60-45c9-8897-c8d0d3ba8be6","Type":"ContainerStarted","Data":"9f9cfbde1401efa524549cff62cff79285eb89cea40af6c21aa8ac0b14982e12"} Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.359876 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" event={"ID":"81418e14-1d60-45c9-8897-c8d0d3ba8be6","Type":"ContainerStarted","Data":"8d2978e4257c51c8d3a6e17395f6f50fd8bb2009f7e1e9b60debb584ffedea8a"} Nov 22 09:21:12 crc kubenswrapper[4693]: I1122 09:21:12.360787 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b0753e7f-679e-4da7-a765-d1d220684511","Type":"ContainerStarted","Data":"cf76e58be56cd5ea87a7cbf55a426a25f3265644ccb74cfcffd521e243e8f09c"} Nov 22 09:21:13 crc kubenswrapper[4693]: I1122 09:21:13.368426 4693 generic.go:334] "Generic (PLEG): container finished" podID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerID="9f9cfbde1401efa524549cff62cff79285eb89cea40af6c21aa8ac0b14982e12" exitCode=0 Nov 22 09:21:13 crc kubenswrapper[4693]: I1122 09:21:13.368468 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" event={"ID":"81418e14-1d60-45c9-8897-c8d0d3ba8be6","Type":"ContainerDied","Data":"9f9cfbde1401efa524549cff62cff79285eb89cea40af6c21aa8ac0b14982e12"} Nov 22 09:21:13 crc kubenswrapper[4693]: I1122 09:21:13.369986 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b0753e7f-679e-4da7-a765-d1d220684511","Type":"ContainerStarted","Data":"7e960541282b52773b82f45d532fd1cfcf6d93612fd0e567e2f6b46707b8c098"} Nov 22 09:21:14 crc kubenswrapper[4693]: I1122 09:21:14.378202 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" event={"ID":"81418e14-1d60-45c9-8897-c8d0d3ba8be6","Type":"ContainerStarted","Data":"705202de6523474100d038a65f48472273bd0688e58a0560a2a539ceb43d08f4"} Nov 22 09:21:14 crc kubenswrapper[4693]: I1122 09:21:14.393795 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" podStartSLOduration=3.393781203 podStartE2EDuration="3.393781203s" podCreationTimestamp="2025-11-22 09:21:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:21:14.391786572 +0000 UTC m=+1070.534288862" watchObservedRunningTime="2025-11-22 09:21:14.393781203 +0000 UTC m=+1070.536283493" Nov 22 09:21:15 crc kubenswrapper[4693]: I1122 09:21:15.384531 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:21 crc kubenswrapper[4693]: I1122 09:21:21.769968 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:21 crc kubenswrapper[4693]: I1122 09:21:21.812510 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-4v4px"] Nov 22 09:21:21 crc kubenswrapper[4693]: I1122 09:21:21.812686 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" containerName="dnsmasq-dns" containerID="cri-o://09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d" gracePeriod=10 Nov 22 09:21:21 crc kubenswrapper[4693]: I1122 09:21:21.918049 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5d5cf5b645-s8pnc"] Nov 22 09:21:21 crc kubenswrapper[4693]: I1122 09:21:21.921110 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:21 crc kubenswrapper[4693]: I1122 09:21:21.944555 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d5cf5b645-s8pnc"] Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014086 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-ovsdbserver-sb\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014150 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-ovsdbserver-nb\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014232 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014306 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r2rxx\" (UniqueName: \"kubernetes.io/projected/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-kube-api-access-r2rxx\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014343 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-dns-svc\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014386 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-dns-swift-storage-0\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.014421 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-config\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115578 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-ovsdbserver-nb\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115654 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115679 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r2rxx\" (UniqueName: \"kubernetes.io/projected/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-kube-api-access-r2rxx\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115710 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-dns-svc\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115741 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-dns-swift-storage-0\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115770 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-config\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.115813 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-ovsdbserver-sb\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.116456 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-ovsdbserver-nb\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.116515 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-ovsdbserver-sb\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.117033 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-dns-svc\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.117093 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-dns-swift-storage-0\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.117559 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-openstack-edpm-ipam\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.117641 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-config\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.138959 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r2rxx\" (UniqueName: \"kubernetes.io/projected/ccb21e39-74b5-4a8f-ad6a-8c1dede8e334-kube-api-access-r2rxx\") pod \"dnsmasq-dns-5d5cf5b645-s8pnc\" (UID: \"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334\") " pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.233341 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.269009 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.318005 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-config\") pod \"c01ee7ab-a925-456b-bd0f-124846156df3\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.318075 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-svc\") pod \"c01ee7ab-a925-456b-bd0f-124846156df3\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.318099 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-nb\") pod \"c01ee7ab-a925-456b-bd0f-124846156df3\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.318118 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2v7jf\" (UniqueName: \"kubernetes.io/projected/c01ee7ab-a925-456b-bd0f-124846156df3-kube-api-access-2v7jf\") pod \"c01ee7ab-a925-456b-bd0f-124846156df3\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.318185 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-sb\") pod \"c01ee7ab-a925-456b-bd0f-124846156df3\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.318260 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-swift-storage-0\") pod \"c01ee7ab-a925-456b-bd0f-124846156df3\" (UID: \"c01ee7ab-a925-456b-bd0f-124846156df3\") " Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.321310 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c01ee7ab-a925-456b-bd0f-124846156df3-kube-api-access-2v7jf" (OuterVolumeSpecName: "kube-api-access-2v7jf") pod "c01ee7ab-a925-456b-bd0f-124846156df3" (UID: "c01ee7ab-a925-456b-bd0f-124846156df3"). InnerVolumeSpecName "kube-api-access-2v7jf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.366773 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "c01ee7ab-a925-456b-bd0f-124846156df3" (UID: "c01ee7ab-a925-456b-bd0f-124846156df3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.367252 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "c01ee7ab-a925-456b-bd0f-124846156df3" (UID: "c01ee7ab-a925-456b-bd0f-124846156df3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.367616 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "c01ee7ab-a925-456b-bd0f-124846156df3" (UID: "c01ee7ab-a925-456b-bd0f-124846156df3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.374403 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "c01ee7ab-a925-456b-bd0f-124846156df3" (UID: "c01ee7ab-a925-456b-bd0f-124846156df3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.376825 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-config" (OuterVolumeSpecName: "config") pod "c01ee7ab-a925-456b-bd0f-124846156df3" (UID: "c01ee7ab-a925-456b-bd0f-124846156df3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.420649 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.420675 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.420684 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.420694 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.420701 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2v7jf\" (UniqueName: \"kubernetes.io/projected/c01ee7ab-a925-456b-bd0f-124846156df3-kube-api-access-2v7jf\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.420710 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c01ee7ab-a925-456b-bd0f-124846156df3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.433727 4693 generic.go:334] "Generic (PLEG): container finished" podID="c01ee7ab-a925-456b-bd0f-124846156df3" containerID="09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d" exitCode=0 Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.433762 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" event={"ID":"c01ee7ab-a925-456b-bd0f-124846156df3","Type":"ContainerDied","Data":"09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d"} Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.433796 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" event={"ID":"c01ee7ab-a925-456b-bd0f-124846156df3","Type":"ContainerDied","Data":"d6e8b325f8ec890b156b4bf5d6a07c4e0cf083bc6a23a9061b9c132bb4d34ab6"} Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.433812 4693 scope.go:117] "RemoveContainer" containerID="09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.433917 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55bfb77665-4v4px" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.471544 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-4v4px"] Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.475480 4693 scope.go:117] "RemoveContainer" containerID="bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.478101 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55bfb77665-4v4px"] Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.492226 4693 scope.go:117] "RemoveContainer" containerID="09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d" Nov 22 09:21:22 crc kubenswrapper[4693]: E1122 09:21:22.492710 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d\": container with ID starting with 09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d not found: ID does not exist" containerID="09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.492742 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d"} err="failed to get container status \"09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d\": rpc error: code = NotFound desc = could not find container \"09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d\": container with ID starting with 09ca5fd4617a1b48817dba87f600a5c346088bd1c854782bba95f5ec8fbca90d not found: ID does not exist" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.492760 4693 scope.go:117] "RemoveContainer" containerID="bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727" Nov 22 09:21:22 crc kubenswrapper[4693]: E1122 09:21:22.493063 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727\": container with ID starting with bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727 not found: ID does not exist" containerID="bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.493080 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727"} err="failed to get container status \"bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727\": rpc error: code = NotFound desc = could not find container \"bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727\": container with ID starting with bcd293dfb8e6519a7e391e233a65a8e215a56d01b1cd33ceb446239424195727 not found: ID does not exist" Nov 22 09:21:22 crc kubenswrapper[4693]: I1122 09:21:22.665882 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5d5cf5b645-s8pnc"] Nov 22 09:21:23 crc kubenswrapper[4693]: I1122 09:21:23.449798 4693 generic.go:334] "Generic (PLEG): container finished" podID="ccb21e39-74b5-4a8f-ad6a-8c1dede8e334" containerID="f53a15fafc82fbdf211fcc764ea6e690242f8d9bf2b4074fcfaa18c993c976ec" exitCode=0 Nov 22 09:21:23 crc kubenswrapper[4693]: I1122 09:21:23.450107 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" event={"ID":"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334","Type":"ContainerDied","Data":"f53a15fafc82fbdf211fcc764ea6e690242f8d9bf2b4074fcfaa18c993c976ec"} Nov 22 09:21:23 crc kubenswrapper[4693]: I1122 09:21:23.450196 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" event={"ID":"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334","Type":"ContainerStarted","Data":"e3d83aab39e7c18df72b738602d489b292234d7eba2b881fc5fddf2b496d8eb3"} Nov 22 09:21:24 crc kubenswrapper[4693]: I1122 09:21:24.155176 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" path="/var/lib/kubelet/pods/c01ee7ab-a925-456b-bd0f-124846156df3/volumes" Nov 22 09:21:24 crc kubenswrapper[4693]: I1122 09:21:24.465164 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" event={"ID":"ccb21e39-74b5-4a8f-ad6a-8c1dede8e334","Type":"ContainerStarted","Data":"e37b040c2c7c69719a787c274efb68f4d4eb4af48cef7ee85622d464affaf288"} Nov 22 09:21:24 crc kubenswrapper[4693]: I1122 09:21:24.465951 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:24 crc kubenswrapper[4693]: I1122 09:21:24.478658 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" podStartSLOduration=3.478643918 podStartE2EDuration="3.478643918s" podCreationTimestamp="2025-11-22 09:21:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:21:24.477184622 +0000 UTC m=+1080.619686913" watchObservedRunningTime="2025-11-22 09:21:24.478643918 +0000 UTC m=+1080.621146209" Nov 22 09:21:30 crc kubenswrapper[4693]: I1122 09:21:30.246754 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:21:30 crc kubenswrapper[4693]: I1122 09:21:30.247143 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.270992 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5d5cf5b645-s8pnc" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.306338 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f7944d86c-szk2w"] Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.306536 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerName="dnsmasq-dns" containerID="cri-o://705202de6523474100d038a65f48472273bd0688e58a0560a2a539ceb43d08f4" gracePeriod=10 Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.524049 4693 generic.go:334] "Generic (PLEG): container finished" podID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerID="705202de6523474100d038a65f48472273bd0688e58a0560a2a539ceb43d08f4" exitCode=0 Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.524214 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" event={"ID":"81418e14-1d60-45c9-8897-c8d0d3ba8be6","Type":"ContainerDied","Data":"705202de6523474100d038a65f48472273bd0688e58a0560a2a539ceb43d08f4"} Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.676682 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.827150 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-openstack-edpm-ipam\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.827285 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-svc\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.828804 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c95tg\" (UniqueName: \"kubernetes.io/projected/81418e14-1d60-45c9-8897-c8d0d3ba8be6-kube-api-access-c95tg\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.828904 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-nb\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.828934 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-swift-storage-0\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.828974 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-sb\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.829080 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-config\") pod \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\" (UID: \"81418e14-1d60-45c9-8897-c8d0d3ba8be6\") " Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.832820 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/81418e14-1d60-45c9-8897-c8d0d3ba8be6-kube-api-access-c95tg" (OuterVolumeSpecName: "kube-api-access-c95tg") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "kube-api-access-c95tg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.862581 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.863175 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.864284 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.865526 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-config" (OuterVolumeSpecName: "config") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.866006 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.871163 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "81418e14-1d60-45c9-8897-c8d0d3ba8be6" (UID: "81418e14-1d60-45c9-8897-c8d0d3ba8be6"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.930889 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.931038 4693 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-svc\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.931101 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c95tg\" (UniqueName: \"kubernetes.io/projected/81418e14-1d60-45c9-8897-c8d0d3ba8be6-kube-api-access-c95tg\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.931156 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.931207 4693 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.931257 4693 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:32 crc kubenswrapper[4693]: I1122 09:21:32.931313 4693 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/81418e14-1d60-45c9-8897-c8d0d3ba8be6-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:21:33 crc kubenswrapper[4693]: I1122 09:21:33.531837 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" event={"ID":"81418e14-1d60-45c9-8897-c8d0d3ba8be6","Type":"ContainerDied","Data":"8d2978e4257c51c8d3a6e17395f6f50fd8bb2009f7e1e9b60debb584ffedea8a"} Nov 22 09:21:33 crc kubenswrapper[4693]: I1122 09:21:33.531904 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f7944d86c-szk2w" Nov 22 09:21:33 crc kubenswrapper[4693]: I1122 09:21:33.531916 4693 scope.go:117] "RemoveContainer" containerID="705202de6523474100d038a65f48472273bd0688e58a0560a2a539ceb43d08f4" Nov 22 09:21:33 crc kubenswrapper[4693]: I1122 09:21:33.551035 4693 scope.go:117] "RemoveContainer" containerID="9f9cfbde1401efa524549cff62cff79285eb89cea40af6c21aa8ac0b14982e12" Nov 22 09:21:33 crc kubenswrapper[4693]: I1122 09:21:33.556222 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f7944d86c-szk2w"] Nov 22 09:21:33 crc kubenswrapper[4693]: I1122 09:21:33.561288 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f7944d86c-szk2w"] Nov 22 09:21:34 crc kubenswrapper[4693]: I1122 09:21:34.155707 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" path="/var/lib/kubelet/pods/81418e14-1d60-45c9-8897-c8d0d3ba8be6/volumes" Nov 22 09:21:44 crc kubenswrapper[4693]: I1122 09:21:44.604698 4693 generic.go:334] "Generic (PLEG): container finished" podID="95a8e4f8-d504-40f1-8137-34a70c82e9cb" containerID="6dffdbdea005b0783a1c90a1425d5ba4bc60e4652da24ac71f29c85fabbf65d9" exitCode=0 Nov 22 09:21:44 crc kubenswrapper[4693]: I1122 09:21:44.604763 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"95a8e4f8-d504-40f1-8137-34a70c82e9cb","Type":"ContainerDied","Data":"6dffdbdea005b0783a1c90a1425d5ba4bc60e4652da24ac71f29c85fabbf65d9"} Nov 22 09:21:44 crc kubenswrapper[4693]: I1122 09:21:44.612437 4693 generic.go:334] "Generic (PLEG): container finished" podID="b0753e7f-679e-4da7-a765-d1d220684511" containerID="7e960541282b52773b82f45d532fd1cfcf6d93612fd0e567e2f6b46707b8c098" exitCode=0 Nov 22 09:21:44 crc kubenswrapper[4693]: I1122 09:21:44.612466 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b0753e7f-679e-4da7-a765-d1d220684511","Type":"ContainerDied","Data":"7e960541282b52773b82f45d532fd1cfcf6d93612fd0e567e2f6b46707b8c098"} Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.472269 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r"] Nov 22 09:21:45 crc kubenswrapper[4693]: E1122 09:21:45.473013 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerName="dnsmasq-dns" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.473112 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerName="dnsmasq-dns" Nov 22 09:21:45 crc kubenswrapper[4693]: E1122 09:21:45.473177 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" containerName="dnsmasq-dns" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.473238 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" containerName="dnsmasq-dns" Nov 22 09:21:45 crc kubenswrapper[4693]: E1122 09:21:45.473298 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" containerName="init" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.473350 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" containerName="init" Nov 22 09:21:45 crc kubenswrapper[4693]: E1122 09:21:45.473432 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerName="init" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.473489 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerName="init" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.473727 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c01ee7ab-a925-456b-bd0f-124846156df3" containerName="dnsmasq-dns" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.473815 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="81418e14-1d60-45c9-8897-c8d0d3ba8be6" containerName="dnsmasq-dns" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.474398 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.475585 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.475663 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.476137 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.476451 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.479995 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r"] Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.620777 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"b0753e7f-679e-4da7-a765-d1d220684511","Type":"ContainerStarted","Data":"64c735d021ae524b1b0c406a6701b26ce535c0ada7bd54ea77aff464498f2e0b"} Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.620999 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.623046 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"95a8e4f8-d504-40f1-8137-34a70c82e9cb","Type":"ContainerStarted","Data":"62fc5d5f701d0458bcc5eb93e03647eee78834af8b62c19072475a2597998893"} Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.623200 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.627618 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.627762 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.627960 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.628061 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4rwb\" (UniqueName: \"kubernetes.io/projected/7529ff10-b67c-4bd3-aa41-46de267c73f3-kube-api-access-d4rwb\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.640227 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=35.640215082 podStartE2EDuration="35.640215082s" podCreationTimestamp="2025-11-22 09:21:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:21:45.634881611 +0000 UTC m=+1101.777383902" watchObservedRunningTime="2025-11-22 09:21:45.640215082 +0000 UTC m=+1101.782717374" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.653862 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=35.653833657 podStartE2EDuration="35.653833657s" podCreationTimestamp="2025-11-22 09:21:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:21:45.651418724 +0000 UTC m=+1101.793921015" watchObservedRunningTime="2025-11-22 09:21:45.653833657 +0000 UTC m=+1101.796335948" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.729175 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.729247 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.729310 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.729395 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4rwb\" (UniqueName: \"kubernetes.io/projected/7529ff10-b67c-4bd3-aa41-46de267c73f3-kube-api-access-d4rwb\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.733458 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.733911 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.734198 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.742809 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4rwb\" (UniqueName: \"kubernetes.io/projected/7529ff10-b67c-4bd3-aa41-46de267c73f3-kube-api-access-d4rwb\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:45 crc kubenswrapper[4693]: I1122 09:21:45.790023 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:21:46 crc kubenswrapper[4693]: I1122 09:21:46.225151 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r"] Nov 22 09:21:46 crc kubenswrapper[4693]: W1122 09:21:46.225387 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7529ff10_b67c_4bd3_aa41_46de267c73f3.slice/crio-69b8c26f82c091e22ecb68f17e520be965086e8164630defebfce1bb4a6aeef3 WatchSource:0}: Error finding container 69b8c26f82c091e22ecb68f17e520be965086e8164630defebfce1bb4a6aeef3: Status 404 returned error can't find the container with id 69b8c26f82c091e22ecb68f17e520be965086e8164630defebfce1bb4a6aeef3 Nov 22 09:21:46 crc kubenswrapper[4693]: I1122 09:21:46.630447 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" event={"ID":"7529ff10-b67c-4bd3-aa41-46de267c73f3","Type":"ContainerStarted","Data":"69b8c26f82c091e22ecb68f17e520be965086e8164630defebfce1bb4a6aeef3"} Nov 22 09:21:53 crc kubenswrapper[4693]: I1122 09:21:53.677776 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" event={"ID":"7529ff10-b67c-4bd3-aa41-46de267c73f3","Type":"ContainerStarted","Data":"48aa4a49b517d38e7f789fccd12dcd6baa8e9daab73eeda2d34531ce6d1627a0"} Nov 22 09:21:53 crc kubenswrapper[4693]: I1122 09:21:53.692793 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" podStartSLOduration=1.809225278 podStartE2EDuration="8.692779425s" podCreationTimestamp="2025-11-22 09:21:45 +0000 UTC" firstStartedPulling="2025-11-22 09:21:46.22761353 +0000 UTC m=+1102.370115821" lastFinishedPulling="2025-11-22 09:21:53.111167676 +0000 UTC m=+1109.253669968" observedRunningTime="2025-11-22 09:21:53.687808046 +0000 UTC m=+1109.830310337" watchObservedRunningTime="2025-11-22 09:21:53.692779425 +0000 UTC m=+1109.835281715" Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.246699 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.247093 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.247133 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.247721 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"bd7cdb6f5b6113a485898440d62015f0754d51b7c39f3dbb9f5870578aac65cf"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.247768 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://bd7cdb6f5b6113a485898440d62015f0754d51b7c39f3dbb9f5870578aac65cf" gracePeriod=600 Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.714123 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.737453 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="bd7cdb6f5b6113a485898440d62015f0754d51b7c39f3dbb9f5870578aac65cf" exitCode=0 Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.737485 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"bd7cdb6f5b6113a485898440d62015f0754d51b7c39f3dbb9f5870578aac65cf"} Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.737504 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"e06c149dfb076b55ef9480cee63c49d1071530a9b01584f0d3bd6318d2df2ea7"} Nov 22 09:22:00 crc kubenswrapper[4693]: I1122 09:22:00.737529 4693 scope.go:117] "RemoveContainer" containerID="b6cafeb0ae9a0297d68d27b5e4364409ddbf47f49b1097384068670f8ea0353f" Nov 22 09:22:01 crc kubenswrapper[4693]: I1122 09:22:01.118010 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Nov 22 09:22:04 crc kubenswrapper[4693]: I1122 09:22:04.766368 4693 generic.go:334] "Generic (PLEG): container finished" podID="7529ff10-b67c-4bd3-aa41-46de267c73f3" containerID="48aa4a49b517d38e7f789fccd12dcd6baa8e9daab73eeda2d34531ce6d1627a0" exitCode=0 Nov 22 09:22:04 crc kubenswrapper[4693]: I1122 09:22:04.766423 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" event={"ID":"7529ff10-b67c-4bd3-aa41-46de267c73f3","Type":"ContainerDied","Data":"48aa4a49b517d38e7f789fccd12dcd6baa8e9daab73eeda2d34531ce6d1627a0"} Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.069560 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.269682 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-repo-setup-combined-ca-bundle\") pod \"7529ff10-b67c-4bd3-aa41-46de267c73f3\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.269782 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4rwb\" (UniqueName: \"kubernetes.io/projected/7529ff10-b67c-4bd3-aa41-46de267c73f3-kube-api-access-d4rwb\") pod \"7529ff10-b67c-4bd3-aa41-46de267c73f3\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.269859 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-ssh-key\") pod \"7529ff10-b67c-4bd3-aa41-46de267c73f3\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.269929 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory\") pod \"7529ff10-b67c-4bd3-aa41-46de267c73f3\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.275188 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "7529ff10-b67c-4bd3-aa41-46de267c73f3" (UID: "7529ff10-b67c-4bd3-aa41-46de267c73f3"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.275208 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7529ff10-b67c-4bd3-aa41-46de267c73f3-kube-api-access-d4rwb" (OuterVolumeSpecName: "kube-api-access-d4rwb") pod "7529ff10-b67c-4bd3-aa41-46de267c73f3" (UID: "7529ff10-b67c-4bd3-aa41-46de267c73f3"). InnerVolumeSpecName "kube-api-access-d4rwb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:22:06 crc kubenswrapper[4693]: E1122 09:22:06.288891 4693 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory podName:7529ff10-b67c-4bd3-aa41-46de267c73f3 nodeName:}" failed. No retries permitted until 2025-11-22 09:22:06.788872006 +0000 UTC m=+1122.931374297 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "inventory" (UniqueName: "kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory") pod "7529ff10-b67c-4bd3-aa41-46de267c73f3" (UID: "7529ff10-b67c-4bd3-aa41-46de267c73f3") : error deleting /var/lib/kubelet/pods/7529ff10-b67c-4bd3-aa41-46de267c73f3/volume-subpaths: remove /var/lib/kubelet/pods/7529ff10-b67c-4bd3-aa41-46de267c73f3/volume-subpaths: no such file or directory Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.290692 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7529ff10-b67c-4bd3-aa41-46de267c73f3" (UID: "7529ff10-b67c-4bd3-aa41-46de267c73f3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.371812 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.371855 4693 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.371867 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4rwb\" (UniqueName: \"kubernetes.io/projected/7529ff10-b67c-4bd3-aa41-46de267c73f3-kube-api-access-d4rwb\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.780455 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" event={"ID":"7529ff10-b67c-4bd3-aa41-46de267c73f3","Type":"ContainerDied","Data":"69b8c26f82c091e22ecb68f17e520be965086e8164630defebfce1bb4a6aeef3"} Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.780490 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="69b8c26f82c091e22ecb68f17e520be965086e8164630defebfce1bb4a6aeef3" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.780505 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.829013 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8"] Nov 22 09:22:06 crc kubenswrapper[4693]: E1122 09:22:06.829306 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7529ff10-b67c-4bd3-aa41-46de267c73f3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.829322 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7529ff10-b67c-4bd3-aa41-46de267c73f3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.829491 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7529ff10-b67c-4bd3-aa41-46de267c73f3" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.830027 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.836114 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8"] Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.878929 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory\") pod \"7529ff10-b67c-4bd3-aa41-46de267c73f3\" (UID: \"7529ff10-b67c-4bd3-aa41-46de267c73f3\") " Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.879804 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.879945 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.880199 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqfgs\" (UniqueName: \"kubernetes.io/projected/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-kube-api-access-bqfgs\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.883049 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory" (OuterVolumeSpecName: "inventory") pod "7529ff10-b67c-4bd3-aa41-46de267c73f3" (UID: "7529ff10-b67c-4bd3-aa41-46de267c73f3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.981159 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.981245 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.981315 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqfgs\" (UniqueName: \"kubernetes.io/projected/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-kube-api-access-bqfgs\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.981354 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7529ff10-b67c-4bd3-aa41-46de267c73f3-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.984694 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.984723 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:06 crc kubenswrapper[4693]: I1122 09:22:06.994195 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqfgs\" (UniqueName: \"kubernetes.io/projected/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-kube-api-access-bqfgs\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-kblb8\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:07 crc kubenswrapper[4693]: I1122 09:22:07.143303 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:07 crc kubenswrapper[4693]: I1122 09:22:07.561574 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8"] Nov 22 09:22:07 crc kubenswrapper[4693]: W1122 09:22:07.568878 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7dd57e37_dbd8_422b_9ce9_ba054526ddd3.slice/crio-9617e192e7b8a075324b7d9d0929f10eab4861f20d03d129cf2e1c3f2dbacd01 WatchSource:0}: Error finding container 9617e192e7b8a075324b7d9d0929f10eab4861f20d03d129cf2e1c3f2dbacd01: Status 404 returned error can't find the container with id 9617e192e7b8a075324b7d9d0929f10eab4861f20d03d129cf2e1c3f2dbacd01 Nov 22 09:22:07 crc kubenswrapper[4693]: I1122 09:22:07.571116 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:22:07 crc kubenswrapper[4693]: I1122 09:22:07.788398 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" event={"ID":"7dd57e37-dbd8-422b-9ce9-ba054526ddd3","Type":"ContainerStarted","Data":"9617e192e7b8a075324b7d9d0929f10eab4861f20d03d129cf2e1c3f2dbacd01"} Nov 22 09:22:08 crc kubenswrapper[4693]: I1122 09:22:08.797273 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" event={"ID":"7dd57e37-dbd8-422b-9ce9-ba054526ddd3","Type":"ContainerStarted","Data":"3efe4b6247a4d5a6f25158991f71df4e92fa47514620cae67ee4187b4a364ab7"} Nov 22 09:22:08 crc kubenswrapper[4693]: I1122 09:22:08.815198 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" podStartSLOduration=2.324950007 podStartE2EDuration="2.815185338s" podCreationTimestamp="2025-11-22 09:22:06 +0000 UTC" firstStartedPulling="2025-11-22 09:22:07.570897457 +0000 UTC m=+1123.713399749" lastFinishedPulling="2025-11-22 09:22:08.061132788 +0000 UTC m=+1124.203635080" observedRunningTime="2025-11-22 09:22:08.808149486 +0000 UTC m=+1124.950651767" watchObservedRunningTime="2025-11-22 09:22:08.815185338 +0000 UTC m=+1124.957687629" Nov 22 09:22:10 crc kubenswrapper[4693]: I1122 09:22:10.810267 4693 generic.go:334] "Generic (PLEG): container finished" podID="7dd57e37-dbd8-422b-9ce9-ba054526ddd3" containerID="3efe4b6247a4d5a6f25158991f71df4e92fa47514620cae67ee4187b4a364ab7" exitCode=0 Nov 22 09:22:10 crc kubenswrapper[4693]: I1122 09:22:10.810350 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" event={"ID":"7dd57e37-dbd8-422b-9ce9-ba054526ddd3","Type":"ContainerDied","Data":"3efe4b6247a4d5a6f25158991f71df4e92fa47514620cae67ee4187b4a364ab7"} Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.105443 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.253749 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-ssh-key\") pod \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.253853 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-inventory\") pod \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.253893 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqfgs\" (UniqueName: \"kubernetes.io/projected/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-kube-api-access-bqfgs\") pod \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\" (UID: \"7dd57e37-dbd8-422b-9ce9-ba054526ddd3\") " Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.258013 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-kube-api-access-bqfgs" (OuterVolumeSpecName: "kube-api-access-bqfgs") pod "7dd57e37-dbd8-422b-9ce9-ba054526ddd3" (UID: "7dd57e37-dbd8-422b-9ce9-ba054526ddd3"). InnerVolumeSpecName "kube-api-access-bqfgs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.273868 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-inventory" (OuterVolumeSpecName: "inventory") pod "7dd57e37-dbd8-422b-9ce9-ba054526ddd3" (UID: "7dd57e37-dbd8-422b-9ce9-ba054526ddd3"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.274436 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7dd57e37-dbd8-422b-9ce9-ba054526ddd3" (UID: "7dd57e37-dbd8-422b-9ce9-ba054526ddd3"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.355346 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqfgs\" (UniqueName: \"kubernetes.io/projected/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-kube-api-access-bqfgs\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.355371 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.355390 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7dd57e37-dbd8-422b-9ce9-ba054526ddd3-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.824795 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" event={"ID":"7dd57e37-dbd8-422b-9ce9-ba054526ddd3","Type":"ContainerDied","Data":"9617e192e7b8a075324b7d9d0929f10eab4861f20d03d129cf2e1c3f2dbacd01"} Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.825017 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9617e192e7b8a075324b7d9d0929f10eab4861f20d03d129cf2e1c3f2dbacd01" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.824854 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-kblb8" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.866120 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2"] Nov 22 09:22:12 crc kubenswrapper[4693]: E1122 09:22:12.866468 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7dd57e37-dbd8-422b-9ce9-ba054526ddd3" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.866485 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7dd57e37-dbd8-422b-9ce9-ba054526ddd3" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.866695 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7dd57e37-dbd8-422b-9ce9-ba054526ddd3" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.867283 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.869427 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.869945 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.869989 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.870210 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.872741 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2"] Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.964628 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sr96\" (UniqueName: \"kubernetes.io/projected/3854e608-0001-4cad-bf75-68cfa7a1486f-kube-api-access-4sr96\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.964696 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.964722 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:12 crc kubenswrapper[4693]: I1122 09:22:12.964779 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.065988 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sr96\" (UniqueName: \"kubernetes.io/projected/3854e608-0001-4cad-bf75-68cfa7a1486f-kube-api-access-4sr96\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.066063 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.066091 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.066120 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.069153 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.069564 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.069801 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.079871 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sr96\" (UniqueName: \"kubernetes.io/projected/3854e608-0001-4cad-bf75-68cfa7a1486f-kube-api-access-4sr96\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.184307 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.594529 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2"] Nov 22 09:22:13 crc kubenswrapper[4693]: I1122 09:22:13.831968 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" event={"ID":"3854e608-0001-4cad-bf75-68cfa7a1486f","Type":"ContainerStarted","Data":"4a7e9c0f753796791a02483ae79e39c9cf431da211d7db7a264c56db6d34499c"} Nov 22 09:22:14 crc kubenswrapper[4693]: I1122 09:22:14.840344 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" event={"ID":"3854e608-0001-4cad-bf75-68cfa7a1486f","Type":"ContainerStarted","Data":"b727d08641e018c2d56af5ffa4ad5293104fd67a1d19275fd51090c265f08c10"} Nov 22 09:22:14 crc kubenswrapper[4693]: I1122 09:22:14.858203 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" podStartSLOduration=2.365978992 podStartE2EDuration="2.858187706s" podCreationTimestamp="2025-11-22 09:22:12 +0000 UTC" firstStartedPulling="2025-11-22 09:22:13.599256696 +0000 UTC m=+1129.741758987" lastFinishedPulling="2025-11-22 09:22:14.091465409 +0000 UTC m=+1130.233967701" observedRunningTime="2025-11-22 09:22:14.852007504 +0000 UTC m=+1130.994509794" watchObservedRunningTime="2025-11-22 09:22:14.858187706 +0000 UTC m=+1131.000689997" Nov 22 09:23:31 crc kubenswrapper[4693]: I1122 09:23:31.455926 4693 scope.go:117] "RemoveContainer" containerID="4244c9d40cece41a21ba36545023fe7fc1475dc0974bd947d29d0b99e58b000e" Nov 22 09:24:00 crc kubenswrapper[4693]: I1122 09:24:00.246837 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:24:00 crc kubenswrapper[4693]: I1122 09:24:00.247283 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:24:30 crc kubenswrapper[4693]: I1122 09:24:30.246258 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:24:30 crc kubenswrapper[4693]: I1122 09:24:30.246690 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:24:31 crc kubenswrapper[4693]: I1122 09:24:31.501689 4693 scope.go:117] "RemoveContainer" containerID="66223a546c5fee881b8313aef5aa8941b7f04f1e366528d6eea707ad71ea476d" Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.246506 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.247143 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.247179 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.247596 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e06c149dfb076b55ef9480cee63c49d1071530a9b01584f0d3bd6318d2df2ea7"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.247643 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://e06c149dfb076b55ef9480cee63c49d1071530a9b01584f0d3bd6318d2df2ea7" gracePeriod=600 Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.884403 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="e06c149dfb076b55ef9480cee63c49d1071530a9b01584f0d3bd6318d2df2ea7" exitCode=0 Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.884470 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"e06c149dfb076b55ef9480cee63c49d1071530a9b01584f0d3bd6318d2df2ea7"} Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.884679 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2"} Nov 22 09:25:00 crc kubenswrapper[4693]: I1122 09:25:00.884697 4693 scope.go:117] "RemoveContainer" containerID="bd7cdb6f5b6113a485898440d62015f0754d51b7c39f3dbb9f5870578aac65cf" Nov 22 09:25:10 crc kubenswrapper[4693]: I1122 09:25:10.953069 4693 generic.go:334] "Generic (PLEG): container finished" podID="3854e608-0001-4cad-bf75-68cfa7a1486f" containerID="b727d08641e018c2d56af5ffa4ad5293104fd67a1d19275fd51090c265f08c10" exitCode=0 Nov 22 09:25:10 crc kubenswrapper[4693]: I1122 09:25:10.953223 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" event={"ID":"3854e608-0001-4cad-bf75-68cfa7a1486f","Type":"ContainerDied","Data":"b727d08641e018c2d56af5ffa4ad5293104fd67a1d19275fd51090c265f08c10"} Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.265028 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.369421 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-ssh-key\") pod \"3854e608-0001-4cad-bf75-68cfa7a1486f\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.369636 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4sr96\" (UniqueName: \"kubernetes.io/projected/3854e608-0001-4cad-bf75-68cfa7a1486f-kube-api-access-4sr96\") pod \"3854e608-0001-4cad-bf75-68cfa7a1486f\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.369689 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-inventory\") pod \"3854e608-0001-4cad-bf75-68cfa7a1486f\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.369748 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-bootstrap-combined-ca-bundle\") pod \"3854e608-0001-4cad-bf75-68cfa7a1486f\" (UID: \"3854e608-0001-4cad-bf75-68cfa7a1486f\") " Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.374544 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "3854e608-0001-4cad-bf75-68cfa7a1486f" (UID: "3854e608-0001-4cad-bf75-68cfa7a1486f"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.374669 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3854e608-0001-4cad-bf75-68cfa7a1486f-kube-api-access-4sr96" (OuterVolumeSpecName: "kube-api-access-4sr96") pod "3854e608-0001-4cad-bf75-68cfa7a1486f" (UID: "3854e608-0001-4cad-bf75-68cfa7a1486f"). InnerVolumeSpecName "kube-api-access-4sr96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.392184 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3854e608-0001-4cad-bf75-68cfa7a1486f" (UID: "3854e608-0001-4cad-bf75-68cfa7a1486f"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.393235 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-inventory" (OuterVolumeSpecName: "inventory") pod "3854e608-0001-4cad-bf75-68cfa7a1486f" (UID: "3854e608-0001-4cad-bf75-68cfa7a1486f"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.472217 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4sr96\" (UniqueName: \"kubernetes.io/projected/3854e608-0001-4cad-bf75-68cfa7a1486f-kube-api-access-4sr96\") on node \"crc\" DevicePath \"\"" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.472245 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.472257 4693 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.472265 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3854e608-0001-4cad-bf75-68cfa7a1486f-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.967273 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" event={"ID":"3854e608-0001-4cad-bf75-68cfa7a1486f","Type":"ContainerDied","Data":"4a7e9c0f753796791a02483ae79e39c9cf431da211d7db7a264c56db6d34499c"} Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.967311 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a7e9c0f753796791a02483ae79e39c9cf431da211d7db7a264c56db6d34499c" Nov 22 09:25:12 crc kubenswrapper[4693]: I1122 09:25:12.967323 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.049255 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn"] Nov 22 09:25:13 crc kubenswrapper[4693]: E1122 09:25:13.049781 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3854e608-0001-4cad-bf75-68cfa7a1486f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.049801 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="3854e608-0001-4cad-bf75-68cfa7a1486f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.050010 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="3854e608-0001-4cad-bf75-68cfa7a1486f" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.050781 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.052407 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.052558 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.052783 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.052972 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.058256 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn"] Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.081531 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.081615 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z4n2v\" (UniqueName: \"kubernetes.io/projected/ac0f3797-4b0b-4b88-8624-95e289cf2386-kube-api-access-z4n2v\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.081666 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.183164 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z4n2v\" (UniqueName: \"kubernetes.io/projected/ac0f3797-4b0b-4b88-8624-95e289cf2386-kube-api-access-z4n2v\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.183226 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.183321 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.187108 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.187246 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.198752 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z4n2v\" (UniqueName: \"kubernetes.io/projected/ac0f3797-4b0b-4b88-8624-95e289cf2386-kube-api-access-z4n2v\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.363215 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.789449 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn"] Nov 22 09:25:13 crc kubenswrapper[4693]: I1122 09:25:13.974604 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" event={"ID":"ac0f3797-4b0b-4b88-8624-95e289cf2386","Type":"ContainerStarted","Data":"d9f1e0873f954572e89e80f3e2f502a1a562e333ac06bffaf3ac79debc8cd7b8"} Nov 22 09:25:14 crc kubenswrapper[4693]: I1122 09:25:14.990406 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" event={"ID":"ac0f3797-4b0b-4b88-8624-95e289cf2386","Type":"ContainerStarted","Data":"3172aa437d6aabc8f48045913f6847db1a33a1984bdaa93b4b27c454ef19b388"} Nov 22 09:25:15 crc kubenswrapper[4693]: I1122 09:25:15.007535 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" podStartSLOduration=1.454434685 podStartE2EDuration="2.007522206s" podCreationTimestamp="2025-11-22 09:25:13 +0000 UTC" firstStartedPulling="2025-11-22 09:25:13.79888793 +0000 UTC m=+1309.941390221" lastFinishedPulling="2025-11-22 09:25:14.35197545 +0000 UTC m=+1310.494477742" observedRunningTime="2025-11-22 09:25:15.004927757 +0000 UTC m=+1311.147430048" watchObservedRunningTime="2025-11-22 09:25:15.007522206 +0000 UTC m=+1311.150024497" Nov 22 09:25:31 crc kubenswrapper[4693]: I1122 09:25:31.553418 4693 scope.go:117] "RemoveContainer" containerID="38e1d3bc7bc5a77353238d3c6c503c80f529dc66c1dfc7aec4b59ba2c6960e37" Nov 22 09:25:31 crc kubenswrapper[4693]: I1122 09:25:31.572954 4693 scope.go:117] "RemoveContainer" containerID="99fcc5431e24b22d409844bd2d38a9737cb2b98ac5503ccc95ebe32368a1f934" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.560085 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ff67s"] Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.572521 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.578832 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ff67s"] Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.775610 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-utilities\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.776229 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlgrr\" (UniqueName: \"kubernetes.io/projected/93201ee2-74ad-4499-822a-df5b2dca3a47-kube-api-access-zlgrr\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.776270 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-catalog-content\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.877767 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlgrr\" (UniqueName: \"kubernetes.io/projected/93201ee2-74ad-4499-822a-df5b2dca3a47-kube-api-access-zlgrr\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.877993 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-catalog-content\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.878151 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-utilities\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.878483 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-catalog-content\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.878528 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-utilities\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.894585 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlgrr\" (UniqueName: \"kubernetes.io/projected/93201ee2-74ad-4499-822a-df5b2dca3a47-kube-api-access-zlgrr\") pod \"certified-operators-ff67s\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:57 crc kubenswrapper[4693]: I1122 09:25:57.896551 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:25:58 crc kubenswrapper[4693]: I1122 09:25:58.340542 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ff67s"] Nov 22 09:25:59 crc kubenswrapper[4693]: I1122 09:25:59.320435 4693 generic.go:334] "Generic (PLEG): container finished" podID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerID="e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b" exitCode=0 Nov 22 09:25:59 crc kubenswrapper[4693]: I1122 09:25:59.320480 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ff67s" event={"ID":"93201ee2-74ad-4499-822a-df5b2dca3a47","Type":"ContainerDied","Data":"e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b"} Nov 22 09:25:59 crc kubenswrapper[4693]: I1122 09:25:59.320670 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ff67s" event={"ID":"93201ee2-74ad-4499-822a-df5b2dca3a47","Type":"ContainerStarted","Data":"b1938729233c2ce80b02a38ac888fcecbf739fe42788f8b339e19c78ce4565fb"} Nov 22 09:26:00 crc kubenswrapper[4693]: I1122 09:26:00.333274 4693 generic.go:334] "Generic (PLEG): container finished" podID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerID="c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312" exitCode=0 Nov 22 09:26:00 crc kubenswrapper[4693]: I1122 09:26:00.333365 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ff67s" event={"ID":"93201ee2-74ad-4499-822a-df5b2dca3a47","Type":"ContainerDied","Data":"c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312"} Nov 22 09:26:01 crc kubenswrapper[4693]: I1122 09:26:01.346493 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ff67s" event={"ID":"93201ee2-74ad-4499-822a-df5b2dca3a47","Type":"ContainerStarted","Data":"19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13"} Nov 22 09:26:01 crc kubenswrapper[4693]: I1122 09:26:01.372643 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ff67s" podStartSLOduration=2.779869754 podStartE2EDuration="4.372618426s" podCreationTimestamp="2025-11-22 09:25:57 +0000 UTC" firstStartedPulling="2025-11-22 09:25:59.322218354 +0000 UTC m=+1355.464720646" lastFinishedPulling="2025-11-22 09:26:00.914967027 +0000 UTC m=+1357.057469318" observedRunningTime="2025-11-22 09:26:01.366784185 +0000 UTC m=+1357.509286477" watchObservedRunningTime="2025-11-22 09:26:01.372618426 +0000 UTC m=+1357.515120717" Nov 22 09:26:07 crc kubenswrapper[4693]: I1122 09:26:07.896998 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:26:07 crc kubenswrapper[4693]: I1122 09:26:07.897527 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:26:07 crc kubenswrapper[4693]: I1122 09:26:07.932123 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:26:08 crc kubenswrapper[4693]: I1122 09:26:08.456689 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:26:08 crc kubenswrapper[4693]: I1122 09:26:08.497421 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ff67s"] Nov 22 09:26:10 crc kubenswrapper[4693]: I1122 09:26:10.435782 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ff67s" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="registry-server" containerID="cri-o://19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13" gracePeriod=2 Nov 22 09:26:10 crc kubenswrapper[4693]: I1122 09:26:10.815955 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.015170 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-utilities\") pod \"93201ee2-74ad-4499-822a-df5b2dca3a47\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.015213 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlgrr\" (UniqueName: \"kubernetes.io/projected/93201ee2-74ad-4499-822a-df5b2dca3a47-kube-api-access-zlgrr\") pod \"93201ee2-74ad-4499-822a-df5b2dca3a47\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.015267 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-catalog-content\") pod \"93201ee2-74ad-4499-822a-df5b2dca3a47\" (UID: \"93201ee2-74ad-4499-822a-df5b2dca3a47\") " Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.015868 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-utilities" (OuterVolumeSpecName: "utilities") pod "93201ee2-74ad-4499-822a-df5b2dca3a47" (UID: "93201ee2-74ad-4499-822a-df5b2dca3a47"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.020477 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93201ee2-74ad-4499-822a-df5b2dca3a47-kube-api-access-zlgrr" (OuterVolumeSpecName: "kube-api-access-zlgrr") pod "93201ee2-74ad-4499-822a-df5b2dca3a47" (UID: "93201ee2-74ad-4499-822a-df5b2dca3a47"). InnerVolumeSpecName "kube-api-access-zlgrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.049771 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "93201ee2-74ad-4499-822a-df5b2dca3a47" (UID: "93201ee2-74ad-4499-822a-df5b2dca3a47"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.116699 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.116726 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlgrr\" (UniqueName: \"kubernetes.io/projected/93201ee2-74ad-4499-822a-df5b2dca3a47-kube-api-access-zlgrr\") on node \"crc\" DevicePath \"\"" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.116737 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/93201ee2-74ad-4499-822a-df5b2dca3a47-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.449375 4693 generic.go:334] "Generic (PLEG): container finished" podID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerID="19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13" exitCode=0 Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.449415 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ff67s" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.449440 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ff67s" event={"ID":"93201ee2-74ad-4499-822a-df5b2dca3a47","Type":"ContainerDied","Data":"19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13"} Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.449618 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ff67s" event={"ID":"93201ee2-74ad-4499-822a-df5b2dca3a47","Type":"ContainerDied","Data":"b1938729233c2ce80b02a38ac888fcecbf739fe42788f8b339e19c78ce4565fb"} Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.449649 4693 scope.go:117] "RemoveContainer" containerID="19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.476308 4693 scope.go:117] "RemoveContainer" containerID="c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.476977 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ff67s"] Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.483798 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ff67s"] Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.502227 4693 scope.go:117] "RemoveContainer" containerID="e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.547105 4693 scope.go:117] "RemoveContainer" containerID="19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13" Nov 22 09:26:11 crc kubenswrapper[4693]: E1122 09:26:11.547588 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13\": container with ID starting with 19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13 not found: ID does not exist" containerID="19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.547627 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13"} err="failed to get container status \"19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13\": rpc error: code = NotFound desc = could not find container \"19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13\": container with ID starting with 19a232b436de993bf40d420eed0f1d0881f81b606c1bf4f47711126329d41c13 not found: ID does not exist" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.547653 4693 scope.go:117] "RemoveContainer" containerID="c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312" Nov 22 09:26:11 crc kubenswrapper[4693]: E1122 09:26:11.547987 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312\": container with ID starting with c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312 not found: ID does not exist" containerID="c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.548009 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312"} err="failed to get container status \"c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312\": rpc error: code = NotFound desc = could not find container \"c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312\": container with ID starting with c07358bdb260ea26e2d3a298a1a3167e3484c6701c4fb37af85e9c3fff6f5312 not found: ID does not exist" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.548027 4693 scope.go:117] "RemoveContainer" containerID="e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b" Nov 22 09:26:11 crc kubenswrapper[4693]: E1122 09:26:11.548269 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b\": container with ID starting with e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b not found: ID does not exist" containerID="e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b" Nov 22 09:26:11 crc kubenswrapper[4693]: I1122 09:26:11.548294 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b"} err="failed to get container status \"e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b\": rpc error: code = NotFound desc = could not find container \"e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b\": container with ID starting with e97fe5e63fb563f4b2dc5ff71739ad90d337aae69129b11b6ab744d4101cdf0b not found: ID does not exist" Nov 22 09:26:12 crc kubenswrapper[4693]: I1122 09:26:12.158043 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" path="/var/lib/kubelet/pods/93201ee2-74ad-4499-822a-df5b2dca3a47/volumes" Nov 22 09:26:31 crc kubenswrapper[4693]: I1122 09:26:31.638314 4693 scope.go:117] "RemoveContainer" containerID="471c170d952e8e8a7bfb755ddd1fd522ec5f22e22398fb8fe78da57465aa3325" Nov 22 09:26:31 crc kubenswrapper[4693]: I1122 09:26:31.664914 4693 scope.go:117] "RemoveContainer" containerID="c675eea055fe4db95e77e3ecb6a919ba82a7f4d09203b04c5f31fbb5a0673ab9" Nov 22 09:26:31 crc kubenswrapper[4693]: I1122 09:26:31.682050 4693 scope.go:117] "RemoveContainer" containerID="fbe9f94431ab78a52f2b6a812910ec0d64dd0d47097de627428699edfd49b70b" Nov 22 09:26:31 crc kubenswrapper[4693]: I1122 09:26:31.696263 4693 scope.go:117] "RemoveContainer" containerID="8c46b6a309214bac7baac4768b7d0027f5fe6ee991d47271b54fc3ed7b69b6a7" Nov 22 09:26:55 crc kubenswrapper[4693]: I1122 09:26:55.812568 4693 generic.go:334] "Generic (PLEG): container finished" podID="ac0f3797-4b0b-4b88-8624-95e289cf2386" containerID="3172aa437d6aabc8f48045913f6847db1a33a1984bdaa93b4b27c454ef19b388" exitCode=0 Nov 22 09:26:55 crc kubenswrapper[4693]: I1122 09:26:55.812763 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" event={"ID":"ac0f3797-4b0b-4b88-8624-95e289cf2386","Type":"ContainerDied","Data":"3172aa437d6aabc8f48045913f6847db1a33a1984bdaa93b4b27c454ef19b388"} Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.170150 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.205734 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z4n2v\" (UniqueName: \"kubernetes.io/projected/ac0f3797-4b0b-4b88-8624-95e289cf2386-kube-api-access-z4n2v\") pod \"ac0f3797-4b0b-4b88-8624-95e289cf2386\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.205899 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-ssh-key\") pod \"ac0f3797-4b0b-4b88-8624-95e289cf2386\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.206281 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-inventory\") pod \"ac0f3797-4b0b-4b88-8624-95e289cf2386\" (UID: \"ac0f3797-4b0b-4b88-8624-95e289cf2386\") " Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.216732 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac0f3797-4b0b-4b88-8624-95e289cf2386-kube-api-access-z4n2v" (OuterVolumeSpecName: "kube-api-access-z4n2v") pod "ac0f3797-4b0b-4b88-8624-95e289cf2386" (UID: "ac0f3797-4b0b-4b88-8624-95e289cf2386"). InnerVolumeSpecName "kube-api-access-z4n2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.230414 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-inventory" (OuterVolumeSpecName: "inventory") pod "ac0f3797-4b0b-4b88-8624-95e289cf2386" (UID: "ac0f3797-4b0b-4b88-8624-95e289cf2386"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.232741 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ac0f3797-4b0b-4b88-8624-95e289cf2386" (UID: "ac0f3797-4b0b-4b88-8624-95e289cf2386"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.307955 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.307992 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z4n2v\" (UniqueName: \"kubernetes.io/projected/ac0f3797-4b0b-4b88-8624-95e289cf2386-kube-api-access-z4n2v\") on node \"crc\" DevicePath \"\"" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.308005 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac0f3797-4b0b-4b88-8624-95e289cf2386-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.833268 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" event={"ID":"ac0f3797-4b0b-4b88-8624-95e289cf2386","Type":"ContainerDied","Data":"d9f1e0873f954572e89e80f3e2f502a1a562e333ac06bffaf3ac79debc8cd7b8"} Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.833564 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d9f1e0873f954572e89e80f3e2f502a1a562e333ac06bffaf3ac79debc8cd7b8" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.833317 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912057 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8"] Nov 22 09:26:57 crc kubenswrapper[4693]: E1122 09:26:57.912427 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="extract-content" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912447 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="extract-content" Nov 22 09:26:57 crc kubenswrapper[4693]: E1122 09:26:57.912469 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac0f3797-4b0b-4b88-8624-95e289cf2386" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912485 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac0f3797-4b0b-4b88-8624-95e289cf2386" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 09:26:57 crc kubenswrapper[4693]: E1122 09:26:57.912500 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="extract-utilities" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912507 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="extract-utilities" Nov 22 09:26:57 crc kubenswrapper[4693]: E1122 09:26:57.912516 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="registry-server" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912522 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="registry-server" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912707 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac0f3797-4b0b-4b88-8624-95e289cf2386" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.912726 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="93201ee2-74ad-4499-822a-df5b2dca3a47" containerName="registry-server" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.913299 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.918812 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.919004 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.919058 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.918892 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.919709 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8"] Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.922466 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q45v8\" (UniqueName: \"kubernetes.io/projected/a63528a4-99a1-4adc-8947-d914a74e4d8b-kube-api-access-q45v8\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.922585 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:57 crc kubenswrapper[4693]: I1122 09:26:57.922642 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.023329 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q45v8\" (UniqueName: \"kubernetes.io/projected/a63528a4-99a1-4adc-8947-d914a74e4d8b-kube-api-access-q45v8\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.023416 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.023485 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.027466 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.028530 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.037174 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q45v8\" (UniqueName: \"kubernetes.io/projected/a63528a4-99a1-4adc-8947-d914a74e4d8b-kube-api-access-q45v8\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.231381 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.712544 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8"] Nov 22 09:26:58 crc kubenswrapper[4693]: I1122 09:26:58.846346 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" event={"ID":"a63528a4-99a1-4adc-8947-d914a74e4d8b","Type":"ContainerStarted","Data":"b0eab3eeb67ec30853e7dc0645aba4f0d21bb6788631a1358e7dff9f9a155659"} Nov 22 09:26:59 crc kubenswrapper[4693]: I1122 09:26:59.033162 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-4148-account-create-gzh7s"] Nov 22 09:26:59 crc kubenswrapper[4693]: I1122 09:26:59.038184 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-8w4v2"] Nov 22 09:26:59 crc kubenswrapper[4693]: I1122 09:26:59.045656 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-4148-account-create-gzh7s"] Nov 22 09:26:59 crc kubenswrapper[4693]: I1122 09:26:59.051270 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-8w4v2"] Nov 22 09:26:59 crc kubenswrapper[4693]: I1122 09:26:59.858721 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" event={"ID":"a63528a4-99a1-4adc-8947-d914a74e4d8b","Type":"ContainerStarted","Data":"26b391a4986b83ffe9cb1aa966c0a77944370e98ab5b17063763a0d8f77fce0b"} Nov 22 09:26:59 crc kubenswrapper[4693]: I1122 09:26:59.876441 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" podStartSLOduration=2.38170603 podStartE2EDuration="2.876422743s" podCreationTimestamp="2025-11-22 09:26:57 +0000 UTC" firstStartedPulling="2025-11-22 09:26:58.715564382 +0000 UTC m=+1414.858066673" lastFinishedPulling="2025-11-22 09:26:59.210281095 +0000 UTC m=+1415.352783386" observedRunningTime="2025-11-22 09:26:59.874788671 +0000 UTC m=+1416.017290951" watchObservedRunningTime="2025-11-22 09:26:59.876422743 +0000 UTC m=+1416.018925034" Nov 22 09:27:00 crc kubenswrapper[4693]: I1122 09:27:00.158412 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d119d80e-d66d-4b1a-8060-063fdffe2dd3" path="/var/lib/kubelet/pods/d119d80e-d66d-4b1a-8060-063fdffe2dd3/volumes" Nov 22 09:27:00 crc kubenswrapper[4693]: I1122 09:27:00.159166 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2e9de7a-dfb1-41ca-a626-36067510f4b4" path="/var/lib/kubelet/pods/f2e9de7a-dfb1-41ca-a626-36067510f4b4/volumes" Nov 22 09:27:00 crc kubenswrapper[4693]: I1122 09:27:00.246578 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:27:00 crc kubenswrapper[4693]: I1122 09:27:00.246628 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:27:01 crc kubenswrapper[4693]: I1122 09:27:01.027751 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-f71f-account-create-hv44c"] Nov 22 09:27:01 crc kubenswrapper[4693]: I1122 09:27:01.037837 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-f71f-account-create-hv44c"] Nov 22 09:27:01 crc kubenswrapper[4693]: I1122 09:27:01.047930 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-pmkp5"] Nov 22 09:27:01 crc kubenswrapper[4693]: I1122 09:27:01.055909 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-pmkp5"] Nov 22 09:27:02 crc kubenswrapper[4693]: I1122 09:27:02.167738 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="212368f3-c21d-4470-a07f-b58004842270" path="/var/lib/kubelet/pods/212368f3-c21d-4470-a07f-b58004842270/volumes" Nov 22 09:27:02 crc kubenswrapper[4693]: I1122 09:27:02.169177 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="797b0e4c-377f-4fbb-bf27-acf705907346" path="/var/lib/kubelet/pods/797b0e4c-377f-4fbb-bf27-acf705907346/volumes" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.721490 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-2gnhw"] Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.731294 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.741041 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2gnhw"] Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.760814 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-catalog-content\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.760880 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rchsw\" (UniqueName: \"kubernetes.io/projected/5158a897-d4d3-4343-a70d-ddd9309c38f1-kube-api-access-rchsw\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.760939 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-utilities\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.863769 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-catalog-content\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.863888 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rchsw\" (UniqueName: \"kubernetes.io/projected/5158a897-d4d3-4343-a70d-ddd9309c38f1-kube-api-access-rchsw\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.864022 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-utilities\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.864425 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-catalog-content\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.864564 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-utilities\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:04 crc kubenswrapper[4693]: I1122 09:27:04.884970 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rchsw\" (UniqueName: \"kubernetes.io/projected/5158a897-d4d3-4343-a70d-ddd9309c38f1-kube-api-access-rchsw\") pod \"community-operators-2gnhw\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:05 crc kubenswrapper[4693]: I1122 09:27:05.059041 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:05 crc kubenswrapper[4693]: I1122 09:27:05.535115 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-2gnhw"] Nov 22 09:27:05 crc kubenswrapper[4693]: I1122 09:27:05.920570 4693 generic.go:334] "Generic (PLEG): container finished" podID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerID="0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace" exitCode=0 Nov 22 09:27:05 crc kubenswrapper[4693]: I1122 09:27:05.920618 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gnhw" event={"ID":"5158a897-d4d3-4343-a70d-ddd9309c38f1","Type":"ContainerDied","Data":"0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace"} Nov 22 09:27:05 crc kubenswrapper[4693]: I1122 09:27:05.920837 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gnhw" event={"ID":"5158a897-d4d3-4343-a70d-ddd9309c38f1","Type":"ContainerStarted","Data":"40536b2e99d2100909060041f7b6b29c2ff6ff41b8a773fcc5bcaf2383b987a4"} Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.024870 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-xbqdj"] Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.034223 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-02aa-account-create-mszg4"] Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.041026 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-xbqdj"] Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.046305 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-02aa-account-create-mszg4"] Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.156965 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bff1c168-3d8b-40e0-93c6-bb2099287437" path="/var/lib/kubelet/pods/bff1c168-3d8b-40e0-93c6-bb2099287437/volumes" Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.157671 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8989032-0fdb-4a30-969e-4654c3a97e46" path="/var/lib/kubelet/pods/e8989032-0fdb-4a30-969e-4654c3a97e46/volumes" Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.931622 4693 generic.go:334] "Generic (PLEG): container finished" podID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerID="14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18" exitCode=0 Nov 22 09:27:06 crc kubenswrapper[4693]: I1122 09:27:06.931686 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gnhw" event={"ID":"5158a897-d4d3-4343-a70d-ddd9309c38f1","Type":"ContainerDied","Data":"14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18"} Nov 22 09:27:07 crc kubenswrapper[4693]: I1122 09:27:07.941942 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gnhw" event={"ID":"5158a897-d4d3-4343-a70d-ddd9309c38f1","Type":"ContainerStarted","Data":"f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f"} Nov 22 09:27:07 crc kubenswrapper[4693]: I1122 09:27:07.960136 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-2gnhw" podStartSLOduration=2.424172548 podStartE2EDuration="3.960121191s" podCreationTimestamp="2025-11-22 09:27:04 +0000 UTC" firstStartedPulling="2025-11-22 09:27:05.923052336 +0000 UTC m=+1422.065554627" lastFinishedPulling="2025-11-22 09:27:07.459000978 +0000 UTC m=+1423.601503270" observedRunningTime="2025-11-22 09:27:07.954470787 +0000 UTC m=+1424.096973077" watchObservedRunningTime="2025-11-22 09:27:07.960121191 +0000 UTC m=+1424.102623482" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.106522 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-ttz8t"] Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.108809 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.118647 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ttz8t"] Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.186932 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-utilities\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.187065 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdcdm\" (UniqueName: \"kubernetes.io/projected/823b7db0-0a89-4740-98a0-a91fb5d5fec9-kube-api-access-kdcdm\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.187113 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-catalog-content\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.288112 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-utilities\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.288232 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdcdm\" (UniqueName: \"kubernetes.io/projected/823b7db0-0a89-4740-98a0-a91fb5d5fec9-kube-api-access-kdcdm\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.288269 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-catalog-content\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.288573 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-utilities\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.288644 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-catalog-content\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.309873 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdcdm\" (UniqueName: \"kubernetes.io/projected/823b7db0-0a89-4740-98a0-a91fb5d5fec9-kube-api-access-kdcdm\") pod \"redhat-marketplace-ttz8t\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.426496 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.888659 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-ttz8t"] Nov 22 09:27:11 crc kubenswrapper[4693]: I1122 09:27:11.979452 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ttz8t" event={"ID":"823b7db0-0a89-4740-98a0-a91fb5d5fec9","Type":"ContainerStarted","Data":"54ffa34490683bee9af66f6eb5fef265228d31e0e002059804e2c865045c9b41"} Nov 22 09:27:12 crc kubenswrapper[4693]: I1122 09:27:12.991625 4693 generic.go:334] "Generic (PLEG): container finished" podID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerID="20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3" exitCode=0 Nov 22 09:27:12 crc kubenswrapper[4693]: I1122 09:27:12.991678 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ttz8t" event={"ID":"823b7db0-0a89-4740-98a0-a91fb5d5fec9","Type":"ContainerDied","Data":"20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3"} Nov 22 09:27:12 crc kubenswrapper[4693]: I1122 09:27:12.994242 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:27:14 crc kubenswrapper[4693]: I1122 09:27:14.014379 4693 generic.go:334] "Generic (PLEG): container finished" podID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerID="6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b" exitCode=0 Nov 22 09:27:14 crc kubenswrapper[4693]: I1122 09:27:14.014575 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ttz8t" event={"ID":"823b7db0-0a89-4740-98a0-a91fb5d5fec9","Type":"ContainerDied","Data":"6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b"} Nov 22 09:27:15 crc kubenswrapper[4693]: I1122 09:27:15.026647 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ttz8t" event={"ID":"823b7db0-0a89-4740-98a0-a91fb5d5fec9","Type":"ContainerStarted","Data":"af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09"} Nov 22 09:27:15 crc kubenswrapper[4693]: I1122 09:27:15.043508 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-ttz8t" podStartSLOduration=2.541027839 podStartE2EDuration="4.043468617s" podCreationTimestamp="2025-11-22 09:27:11 +0000 UTC" firstStartedPulling="2025-11-22 09:27:12.993964299 +0000 UTC m=+1429.136466591" lastFinishedPulling="2025-11-22 09:27:14.496405078 +0000 UTC m=+1430.638907369" observedRunningTime="2025-11-22 09:27:15.040488724 +0000 UTC m=+1431.182991005" watchObservedRunningTime="2025-11-22 09:27:15.043468617 +0000 UTC m=+1431.185970908" Nov 22 09:27:15 crc kubenswrapper[4693]: I1122 09:27:15.060149 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:15 crc kubenswrapper[4693]: I1122 09:27:15.060232 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:15 crc kubenswrapper[4693]: I1122 09:27:15.097305 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:16 crc kubenswrapper[4693]: I1122 09:27:16.065715 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:17 crc kubenswrapper[4693]: I1122 09:27:17.300490 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2gnhw"] Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.064520 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-2gnhw" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="registry-server" containerID="cri-o://f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f" gracePeriod=2 Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.466458 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.656766 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-catalog-content\") pod \"5158a897-d4d3-4343-a70d-ddd9309c38f1\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.657273 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-utilities\") pod \"5158a897-d4d3-4343-a70d-ddd9309c38f1\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.658207 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-utilities" (OuterVolumeSpecName: "utilities") pod "5158a897-d4d3-4343-a70d-ddd9309c38f1" (UID: "5158a897-d4d3-4343-a70d-ddd9309c38f1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.658328 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rchsw\" (UniqueName: \"kubernetes.io/projected/5158a897-d4d3-4343-a70d-ddd9309c38f1-kube-api-access-rchsw\") pod \"5158a897-d4d3-4343-a70d-ddd9309c38f1\" (UID: \"5158a897-d4d3-4343-a70d-ddd9309c38f1\") " Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.659379 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.665158 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5158a897-d4d3-4343-a70d-ddd9309c38f1-kube-api-access-rchsw" (OuterVolumeSpecName: "kube-api-access-rchsw") pod "5158a897-d4d3-4343-a70d-ddd9309c38f1" (UID: "5158a897-d4d3-4343-a70d-ddd9309c38f1"). InnerVolumeSpecName "kube-api-access-rchsw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.697115 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5158a897-d4d3-4343-a70d-ddd9309c38f1" (UID: "5158a897-d4d3-4343-a70d-ddd9309c38f1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.761130 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5158a897-d4d3-4343-a70d-ddd9309c38f1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:27:19 crc kubenswrapper[4693]: I1122 09:27:19.761156 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rchsw\" (UniqueName: \"kubernetes.io/projected/5158a897-d4d3-4343-a70d-ddd9309c38f1-kube-api-access-rchsw\") on node \"crc\" DevicePath \"\"" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.073434 4693 generic.go:334] "Generic (PLEG): container finished" podID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerID="f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f" exitCode=0 Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.073473 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gnhw" event={"ID":"5158a897-d4d3-4343-a70d-ddd9309c38f1","Type":"ContainerDied","Data":"f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f"} Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.073501 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-2gnhw" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.073516 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-2gnhw" event={"ID":"5158a897-d4d3-4343-a70d-ddd9309c38f1","Type":"ContainerDied","Data":"40536b2e99d2100909060041f7b6b29c2ff6ff41b8a773fcc5bcaf2383b987a4"} Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.073536 4693 scope.go:117] "RemoveContainer" containerID="f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.098838 4693 scope.go:117] "RemoveContainer" containerID="14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.101980 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-2gnhw"] Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.107132 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-2gnhw"] Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.128743 4693 scope.go:117] "RemoveContainer" containerID="0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.153571 4693 scope.go:117] "RemoveContainer" containerID="f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f" Nov 22 09:27:20 crc kubenswrapper[4693]: E1122 09:27:20.155086 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f\": container with ID starting with f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f not found: ID does not exist" containerID="f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.155147 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f"} err="failed to get container status \"f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f\": rpc error: code = NotFound desc = could not find container \"f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f\": container with ID starting with f5f979e3b5a3617c02cf55cf098041522dc7d8f31b95cef6f9acdcde71d40d9f not found: ID does not exist" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.155184 4693 scope.go:117] "RemoveContainer" containerID="14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18" Nov 22 09:27:20 crc kubenswrapper[4693]: E1122 09:27:20.155665 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18\": container with ID starting with 14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18 not found: ID does not exist" containerID="14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.155695 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18"} err="failed to get container status \"14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18\": rpc error: code = NotFound desc = could not find container \"14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18\": container with ID starting with 14942ecaf7beb78b38afda86d8227f2cf30d58db38b79554ff9d70f6dddefd18 not found: ID does not exist" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.155719 4693 scope.go:117] "RemoveContainer" containerID="0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.156097 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" path="/var/lib/kubelet/pods/5158a897-d4d3-4343-a70d-ddd9309c38f1/volumes" Nov 22 09:27:20 crc kubenswrapper[4693]: E1122 09:27:20.156185 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace\": container with ID starting with 0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace not found: ID does not exist" containerID="0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace" Nov 22 09:27:20 crc kubenswrapper[4693]: I1122 09:27:20.156235 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace"} err="failed to get container status \"0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace\": rpc error: code = NotFound desc = could not find container \"0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace\": container with ID starting with 0b517d27bf51b1057b00ab8979e0580d7ee3563d52cdb92a7548b3f6fcd5aace not found: ID does not exist" Nov 22 09:27:21 crc kubenswrapper[4693]: I1122 09:27:21.427544 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:21 crc kubenswrapper[4693]: I1122 09:27:21.427968 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:21 crc kubenswrapper[4693]: I1122 09:27:21.465578 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:22 crc kubenswrapper[4693]: I1122 09:27:22.178659 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:22 crc kubenswrapper[4693]: I1122 09:27:22.694780 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ttz8t"] Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.104314 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-ttz8t" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="registry-server" containerID="cri-o://af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09" gracePeriod=2 Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.495970 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.547967 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdcdm\" (UniqueName: \"kubernetes.io/projected/823b7db0-0a89-4740-98a0-a91fb5d5fec9-kube-api-access-kdcdm\") pod \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.548013 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-catalog-content\") pod \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.548308 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-utilities\") pod \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\" (UID: \"823b7db0-0a89-4740-98a0-a91fb5d5fec9\") " Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.549057 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-utilities" (OuterVolumeSpecName: "utilities") pod "823b7db0-0a89-4740-98a0-a91fb5d5fec9" (UID: "823b7db0-0a89-4740-98a0-a91fb5d5fec9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.553325 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/823b7db0-0a89-4740-98a0-a91fb5d5fec9-kube-api-access-kdcdm" (OuterVolumeSpecName: "kube-api-access-kdcdm") pod "823b7db0-0a89-4740-98a0-a91fb5d5fec9" (UID: "823b7db0-0a89-4740-98a0-a91fb5d5fec9"). InnerVolumeSpecName "kube-api-access-kdcdm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.561261 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "823b7db0-0a89-4740-98a0-a91fb5d5fec9" (UID: "823b7db0-0a89-4740-98a0-a91fb5d5fec9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.649352 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdcdm\" (UniqueName: \"kubernetes.io/projected/823b7db0-0a89-4740-98a0-a91fb5d5fec9-kube-api-access-kdcdm\") on node \"crc\" DevicePath \"\"" Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.649382 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:27:24 crc kubenswrapper[4693]: I1122 09:27:24.649395 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/823b7db0-0a89-4740-98a0-a91fb5d5fec9-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.118027 4693 generic.go:334] "Generic (PLEG): container finished" podID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerID="af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09" exitCode=0 Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.118572 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ttz8t" event={"ID":"823b7db0-0a89-4740-98a0-a91fb5d5fec9","Type":"ContainerDied","Data":"af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09"} Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.118626 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-ttz8t" event={"ID":"823b7db0-0a89-4740-98a0-a91fb5d5fec9","Type":"ContainerDied","Data":"54ffa34490683bee9af66f6eb5fef265228d31e0e002059804e2c865045c9b41"} Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.118656 4693 scope.go:117] "RemoveContainer" containerID="af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.119133 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-ttz8t" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.152198 4693 scope.go:117] "RemoveContainer" containerID="6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.158972 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-ttz8t"] Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.164975 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-ttz8t"] Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.170422 4693 scope.go:117] "RemoveContainer" containerID="20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.204994 4693 scope.go:117] "RemoveContainer" containerID="af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09" Nov 22 09:27:25 crc kubenswrapper[4693]: E1122 09:27:25.205395 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09\": container with ID starting with af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09 not found: ID does not exist" containerID="af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.205425 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09"} err="failed to get container status \"af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09\": rpc error: code = NotFound desc = could not find container \"af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09\": container with ID starting with af28ff1305c4665baf8d7716991434c2bf52b7f55cfd1493ff13d558add8fc09 not found: ID does not exist" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.205448 4693 scope.go:117] "RemoveContainer" containerID="6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b" Nov 22 09:27:25 crc kubenswrapper[4693]: E1122 09:27:25.205668 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b\": container with ID starting with 6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b not found: ID does not exist" containerID="6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.205688 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b"} err="failed to get container status \"6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b\": rpc error: code = NotFound desc = could not find container \"6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b\": container with ID starting with 6f404d7fde9d7e4959c3edaa563fda61106f95e48cdb0196cb95fb485d5ad33b not found: ID does not exist" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.205700 4693 scope.go:117] "RemoveContainer" containerID="20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3" Nov 22 09:27:25 crc kubenswrapper[4693]: E1122 09:27:25.205910 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3\": container with ID starting with 20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3 not found: ID does not exist" containerID="20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3" Nov 22 09:27:25 crc kubenswrapper[4693]: I1122 09:27:25.205932 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3"} err="failed to get container status \"20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3\": rpc error: code = NotFound desc = could not find container \"20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3\": container with ID starting with 20d76089fa3e5aac444ba37720fe54b953eccf0cfdbce2ad16a4f3d69e3692e3 not found: ID does not exist" Nov 22 09:27:26 crc kubenswrapper[4693]: I1122 09:27:26.154694 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" path="/var/lib/kubelet/pods/823b7db0-0a89-4740-98a0-a91fb5d5fec9/volumes" Nov 22 09:27:27 crc kubenswrapper[4693]: I1122 09:27:27.030603 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-5sz6k"] Nov 22 09:27:27 crc kubenswrapper[4693]: I1122 09:27:27.036360 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-5sz6k"] Nov 22 09:27:28 crc kubenswrapper[4693]: I1122 09:27:28.154383 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78b948e6-0096-4708-9bd3-74a2a8d7cc37" path="/var/lib/kubelet/pods/78b948e6-0096-4708-9bd3-74a2a8d7cc37/volumes" Nov 22 09:27:30 crc kubenswrapper[4693]: I1122 09:27:30.246413 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:27:30 crc kubenswrapper[4693]: I1122 09:27:30.247133 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.748464 4693 scope.go:117] "RemoveContainer" containerID="54099bcd3b274e8d9621fb589a28e09e3fb000ef65b4c9218e97b02e6729895b" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.768314 4693 scope.go:117] "RemoveContainer" containerID="c8544bb38558e53ec3128880bcfdee6e543c086bada3a7146db8b2c0434c2462" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.801707 4693 scope.go:117] "RemoveContainer" containerID="dcc21f302db42beb74cc74bed7105f34ae65d435733ce906850f298da6cdd021" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.835140 4693 scope.go:117] "RemoveContainer" containerID="caa061c5aebce046bbf4bb08476d710369531f72a30c521f730750c190ae77b0" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.860125 4693 scope.go:117] "RemoveContainer" containerID="fa9499b404d2eec2239d43a6eca74d26c3746b0142792bc4b9f8d19eabc3f4ba" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.897828 4693 scope.go:117] "RemoveContainer" containerID="b50c384d824b3605d696d09b4899d4315c63d85f21804c7a743d1317b05c7afd" Nov 22 09:27:31 crc kubenswrapper[4693]: I1122 09:27:31.925213 4693 scope.go:117] "RemoveContainer" containerID="d4096b5605a401f44590daafd2f56035c2a447456a8950d00d67a74236b1b4c6" Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.026138 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-kc2bz"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.034227 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-2170-account-create-t2wl8"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.042942 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-7bss2"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.048580 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-mpg6l"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.053346 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-b5a6-account-create-gb6vh"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.057870 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-kc2bz"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.062534 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-7bss2"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.067096 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-b5a6-account-create-gb6vh"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.072643 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-2170-account-create-t2wl8"] Nov 22 09:27:37 crc kubenswrapper[4693]: I1122 09:27:37.079077 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-mpg6l"] Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.021008 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-178d-account-create-swm6h"] Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.026031 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-178d-account-create-swm6h"] Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.159750 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b7bc687-9339-4ddd-a589-a23bb9880872" path="/var/lib/kubelet/pods/4b7bc687-9339-4ddd-a589-a23bb9880872/volumes" Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.160802 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8c63c124-8c2a-4bde-8a4d-50441a10ba02" path="/var/lib/kubelet/pods/8c63c124-8c2a-4bde-8a4d-50441a10ba02/volumes" Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.161327 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="956e2855-d931-4003-b10c-ecb7712d4793" path="/var/lib/kubelet/pods/956e2855-d931-4003-b10c-ecb7712d4793/volumes" Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.161832 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207" path="/var/lib/kubelet/pods/9ec2c337-0eb0-4bc4-88b0-c4df4a5e1207/volumes" Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.162759 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0410ad7-7eac-4d62-96e3-4dfa4c718f9e" path="/var/lib/kubelet/pods/c0410ad7-7eac-4d62-96e3-4dfa4c718f9e/volumes" Nov 22 09:27:38 crc kubenswrapper[4693]: I1122 09:27:38.163285 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d7451501-ca2e-42fc-884b-fc0ca2fa393e" path="/var/lib/kubelet/pods/d7451501-ca2e-42fc-884b-fc0ca2fa393e/volumes" Nov 22 09:27:44 crc kubenswrapper[4693]: I1122 09:27:44.034037 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-c6rqt"] Nov 22 09:27:44 crc kubenswrapper[4693]: I1122 09:27:44.041804 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-c6rqt"] Nov 22 09:27:44 crc kubenswrapper[4693]: I1122 09:27:44.158338 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0917b65-1f5c-4c04-8189-a070d8409c64" path="/var/lib/kubelet/pods/f0917b65-1f5c-4c04-8189-a070d8409c64/volumes" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.026715 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-ccc76"] Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.033081 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-ccc76"] Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.159454 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="977876f3-91a6-4f81-9c62-b7d5fc9b1508" path="/var/lib/kubelet/pods/977876f3-91a6-4f81-9c62-b7d5fc9b1508/volumes" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.246498 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.246579 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.246635 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.247423 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.247494 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" gracePeriod=600 Nov 22 09:28:00 crc kubenswrapper[4693]: E1122 09:28:00.367434 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.419912 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" exitCode=0 Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.419962 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2"} Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.420001 4693 scope.go:117] "RemoveContainer" containerID="e06c149dfb076b55ef9480cee63c49d1071530a9b01584f0d3bd6318d2df2ea7" Nov 22 09:28:00 crc kubenswrapper[4693]: I1122 09:28:00.420440 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:28:00 crc kubenswrapper[4693]: E1122 09:28:00.420706 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.582766 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-k7rwx"] Nov 22 09:28:04 crc kubenswrapper[4693]: E1122 09:28:04.583707 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="extract-utilities" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583721 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="extract-utilities" Nov 22 09:28:04 crc kubenswrapper[4693]: E1122 09:28:04.583731 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="extract-content" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583736 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="extract-content" Nov 22 09:28:04 crc kubenswrapper[4693]: E1122 09:28:04.583744 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="registry-server" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583749 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="registry-server" Nov 22 09:28:04 crc kubenswrapper[4693]: E1122 09:28:04.583760 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="registry-server" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583766 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="registry-server" Nov 22 09:28:04 crc kubenswrapper[4693]: E1122 09:28:04.583780 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="extract-content" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583785 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="extract-content" Nov 22 09:28:04 crc kubenswrapper[4693]: E1122 09:28:04.583806 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="extract-utilities" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583811 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="extract-utilities" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.583996 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="5158a897-d4d3-4343-a70d-ddd9309c38f1" containerName="registry-server" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.584008 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="823b7db0-0a89-4740-98a0-a91fb5d5fec9" containerName="registry-server" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.585274 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.598230 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k7rwx"] Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.692701 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-utilities\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.692735 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-catalog-content\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.693454 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sp5z7\" (UniqueName: \"kubernetes.io/projected/17fd32b3-9726-4e58-8464-65eb2361058c-kube-api-access-sp5z7\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.795174 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-utilities\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.795230 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-catalog-content\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.795332 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sp5z7\" (UniqueName: \"kubernetes.io/projected/17fd32b3-9726-4e58-8464-65eb2361058c-kube-api-access-sp5z7\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.795595 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-utilities\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.795616 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-catalog-content\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.815706 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sp5z7\" (UniqueName: \"kubernetes.io/projected/17fd32b3-9726-4e58-8464-65eb2361058c-kube-api-access-sp5z7\") pod \"redhat-operators-k7rwx\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:04 crc kubenswrapper[4693]: I1122 09:28:04.903338 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:05 crc kubenswrapper[4693]: I1122 09:28:05.334506 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-k7rwx"] Nov 22 09:28:05 crc kubenswrapper[4693]: I1122 09:28:05.480387 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerStarted","Data":"843ec159a5f747435229dc419f8b569f2a7cc8b812bbd981f91dae844734890a"} Nov 22 09:28:05 crc kubenswrapper[4693]: E1122 09:28:05.670877 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17fd32b3_9726_4e58_8464_65eb2361058c.slice/crio-62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod17fd32b3_9726_4e58_8464_65eb2361058c.slice/crio-conmon-62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5.scope\": RecentStats: unable to find data in memory cache]" Nov 22 09:28:06 crc kubenswrapper[4693]: I1122 09:28:06.494730 4693 generic.go:334] "Generic (PLEG): container finished" podID="17fd32b3-9726-4e58-8464-65eb2361058c" containerID="62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5" exitCode=0 Nov 22 09:28:06 crc kubenswrapper[4693]: I1122 09:28:06.495054 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerDied","Data":"62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5"} Nov 22 09:28:07 crc kubenswrapper[4693]: I1122 09:28:07.502555 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerStarted","Data":"f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825"} Nov 22 09:28:08 crc kubenswrapper[4693]: I1122 09:28:08.521738 4693 generic.go:334] "Generic (PLEG): container finished" podID="17fd32b3-9726-4e58-8464-65eb2361058c" containerID="f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825" exitCode=0 Nov 22 09:28:08 crc kubenswrapper[4693]: I1122 09:28:08.521881 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerDied","Data":"f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825"} Nov 22 09:28:08 crc kubenswrapper[4693]: I1122 09:28:08.526442 4693 generic.go:334] "Generic (PLEG): container finished" podID="a63528a4-99a1-4adc-8947-d914a74e4d8b" containerID="26b391a4986b83ffe9cb1aa966c0a77944370e98ab5b17063763a0d8f77fce0b" exitCode=0 Nov 22 09:28:08 crc kubenswrapper[4693]: I1122 09:28:08.526483 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" event={"ID":"a63528a4-99a1-4adc-8947-d914a74e4d8b","Type":"ContainerDied","Data":"26b391a4986b83ffe9cb1aa966c0a77944370e98ab5b17063763a0d8f77fce0b"} Nov 22 09:28:09 crc kubenswrapper[4693]: I1122 09:28:09.536550 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerStarted","Data":"b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d"} Nov 22 09:28:09 crc kubenswrapper[4693]: I1122 09:28:09.559689 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-k7rwx" podStartSLOduration=3.029669319 podStartE2EDuration="5.559654542s" podCreationTimestamp="2025-11-22 09:28:04 +0000 UTC" firstStartedPulling="2025-11-22 09:28:06.497750356 +0000 UTC m=+1482.640252647" lastFinishedPulling="2025-11-22 09:28:09.027735578 +0000 UTC m=+1485.170237870" observedRunningTime="2025-11-22 09:28:09.553661684 +0000 UTC m=+1485.696163975" watchObservedRunningTime="2025-11-22 09:28:09.559654542 +0000 UTC m=+1485.702156833" Nov 22 09:28:09 crc kubenswrapper[4693]: I1122 09:28:09.881913 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.080988 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-inventory\") pod \"a63528a4-99a1-4adc-8947-d914a74e4d8b\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.081263 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q45v8\" (UniqueName: \"kubernetes.io/projected/a63528a4-99a1-4adc-8947-d914a74e4d8b-kube-api-access-q45v8\") pod \"a63528a4-99a1-4adc-8947-d914a74e4d8b\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.081411 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-ssh-key\") pod \"a63528a4-99a1-4adc-8947-d914a74e4d8b\" (UID: \"a63528a4-99a1-4adc-8947-d914a74e4d8b\") " Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.086639 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a63528a4-99a1-4adc-8947-d914a74e4d8b-kube-api-access-q45v8" (OuterVolumeSpecName: "kube-api-access-q45v8") pod "a63528a4-99a1-4adc-8947-d914a74e4d8b" (UID: "a63528a4-99a1-4adc-8947-d914a74e4d8b"). InnerVolumeSpecName "kube-api-access-q45v8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.102748 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-inventory" (OuterVolumeSpecName: "inventory") pod "a63528a4-99a1-4adc-8947-d914a74e4d8b" (UID: "a63528a4-99a1-4adc-8947-d914a74e4d8b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.107536 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a63528a4-99a1-4adc-8947-d914a74e4d8b" (UID: "a63528a4-99a1-4adc-8947-d914a74e4d8b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.183730 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.183946 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q45v8\" (UniqueName: \"kubernetes.io/projected/a63528a4-99a1-4adc-8947-d914a74e4d8b-kube-api-access-q45v8\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.183958 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a63528a4-99a1-4adc-8947-d914a74e4d8b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.548800 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" event={"ID":"a63528a4-99a1-4adc-8947-d914a74e4d8b","Type":"ContainerDied","Data":"b0eab3eeb67ec30853e7dc0645aba4f0d21bb6788631a1358e7dff9f9a155659"} Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.548869 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.548889 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b0eab3eeb67ec30853e7dc0645aba4f0d21bb6788631a1358e7dff9f9a155659" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.622914 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c"] Nov 22 09:28:10 crc kubenswrapper[4693]: E1122 09:28:10.623352 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a63528a4-99a1-4adc-8947-d914a74e4d8b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.623372 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a63528a4-99a1-4adc-8947-d914a74e4d8b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.623583 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a63528a4-99a1-4adc-8947-d914a74e4d8b" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.624317 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.626863 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.626886 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.627208 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.629088 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.640058 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c"] Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.691424 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zxv6g\" (UniqueName: \"kubernetes.io/projected/a19ea96b-8910-41e1-a8c6-901206473d72-kube-api-access-zxv6g\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.691681 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.691886 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.793594 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.793659 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zxv6g\" (UniqueName: \"kubernetes.io/projected/a19ea96b-8910-41e1-a8c6-901206473d72-kube-api-access-zxv6g\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.793798 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.798039 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.799017 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.809038 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zxv6g\" (UniqueName: \"kubernetes.io/projected/a19ea96b-8910-41e1-a8c6-901206473d72-kube-api-access-zxv6g\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-dr42c\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:10 crc kubenswrapper[4693]: I1122 09:28:10.938370 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:11 crc kubenswrapper[4693]: I1122 09:28:11.055783 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-fhrhl"] Nov 22 09:28:11 crc kubenswrapper[4693]: I1122 09:28:11.065566 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-fhrhl"] Nov 22 09:28:11 crc kubenswrapper[4693]: I1122 09:28:11.070870 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-hvt8s"] Nov 22 09:28:11 crc kubenswrapper[4693]: I1122 09:28:11.082439 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-hvt8s"] Nov 22 09:28:11 crc kubenswrapper[4693]: I1122 09:28:11.417019 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c"] Nov 22 09:28:11 crc kubenswrapper[4693]: I1122 09:28:11.566267 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" event={"ID":"a19ea96b-8910-41e1-a8c6-901206473d72","Type":"ContainerStarted","Data":"64391d6ae4937b344f1603411d063b96496700280189e4d15f35c709a41961bd"} Nov 22 09:28:12 crc kubenswrapper[4693]: I1122 09:28:12.159923 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c37ecde5-29e1-4377-95be-2b7da9e65110" path="/var/lib/kubelet/pods/c37ecde5-29e1-4377-95be-2b7da9e65110/volumes" Nov 22 09:28:12 crc kubenswrapper[4693]: I1122 09:28:12.160521 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dacf4def-2cb8-4e5b-8761-e3a45a4a89d9" path="/var/lib/kubelet/pods/dacf4def-2cb8-4e5b-8761-e3a45a4a89d9/volumes" Nov 22 09:28:12 crc kubenswrapper[4693]: I1122 09:28:12.588171 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" event={"ID":"a19ea96b-8910-41e1-a8c6-901206473d72","Type":"ContainerStarted","Data":"c4e8ece365a0c4009a48ae83b6f5057abe9006fb83927fcbc563db98eaf5fc4f"} Nov 22 09:28:12 crc kubenswrapper[4693]: I1122 09:28:12.605080 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" podStartSLOduration=2.10824141 podStartE2EDuration="2.605061618s" podCreationTimestamp="2025-11-22 09:28:10 +0000 UTC" firstStartedPulling="2025-11-22 09:28:11.41942681 +0000 UTC m=+1487.561929091" lastFinishedPulling="2025-11-22 09:28:11.916247007 +0000 UTC m=+1488.058749299" observedRunningTime="2025-11-22 09:28:12.601506081 +0000 UTC m=+1488.744008373" watchObservedRunningTime="2025-11-22 09:28:12.605061618 +0000 UTC m=+1488.747563908" Nov 22 09:28:13 crc kubenswrapper[4693]: I1122 09:28:13.023183 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-k75fh"] Nov 22 09:28:13 crc kubenswrapper[4693]: I1122 09:28:13.032514 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-k75fh"] Nov 22 09:28:13 crc kubenswrapper[4693]: I1122 09:28:13.148091 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:28:13 crc kubenswrapper[4693]: E1122 09:28:13.148531 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:28:14 crc kubenswrapper[4693]: I1122 09:28:14.158110 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5651530-e21c-44f7-9e9c-3187b4fcd3c6" path="/var/lib/kubelet/pods/b5651530-e21c-44f7-9e9c-3187b4fcd3c6/volumes" Nov 22 09:28:14 crc kubenswrapper[4693]: I1122 09:28:14.904140 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:14 crc kubenswrapper[4693]: I1122 09:28:14.904261 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:14 crc kubenswrapper[4693]: I1122 09:28:14.940648 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:15 crc kubenswrapper[4693]: I1122 09:28:15.613191 4693 generic.go:334] "Generic (PLEG): container finished" podID="a19ea96b-8910-41e1-a8c6-901206473d72" containerID="c4e8ece365a0c4009a48ae83b6f5057abe9006fb83927fcbc563db98eaf5fc4f" exitCode=0 Nov 22 09:28:15 crc kubenswrapper[4693]: I1122 09:28:15.613274 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" event={"ID":"a19ea96b-8910-41e1-a8c6-901206473d72","Type":"ContainerDied","Data":"c4e8ece365a0c4009a48ae83b6f5057abe9006fb83927fcbc563db98eaf5fc4f"} Nov 22 09:28:15 crc kubenswrapper[4693]: I1122 09:28:15.650227 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:15 crc kubenswrapper[4693]: I1122 09:28:15.683149 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k7rwx"] Nov 22 09:28:16 crc kubenswrapper[4693]: I1122 09:28:16.930402 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.025567 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-ssh-key\") pod \"a19ea96b-8910-41e1-a8c6-901206473d72\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.025679 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-inventory\") pod \"a19ea96b-8910-41e1-a8c6-901206473d72\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.026358 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zxv6g\" (UniqueName: \"kubernetes.io/projected/a19ea96b-8910-41e1-a8c6-901206473d72-kube-api-access-zxv6g\") pod \"a19ea96b-8910-41e1-a8c6-901206473d72\" (UID: \"a19ea96b-8910-41e1-a8c6-901206473d72\") " Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.030940 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a19ea96b-8910-41e1-a8c6-901206473d72-kube-api-access-zxv6g" (OuterVolumeSpecName: "kube-api-access-zxv6g") pod "a19ea96b-8910-41e1-a8c6-901206473d72" (UID: "a19ea96b-8910-41e1-a8c6-901206473d72"). InnerVolumeSpecName "kube-api-access-zxv6g". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.047744 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a19ea96b-8910-41e1-a8c6-901206473d72" (UID: "a19ea96b-8910-41e1-a8c6-901206473d72"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.051427 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-inventory" (OuterVolumeSpecName: "inventory") pod "a19ea96b-8910-41e1-a8c6-901206473d72" (UID: "a19ea96b-8910-41e1-a8c6-901206473d72"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.128685 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.128721 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a19ea96b-8910-41e1-a8c6-901206473d72-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.128734 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zxv6g\" (UniqueName: \"kubernetes.io/projected/a19ea96b-8910-41e1-a8c6-901206473d72-kube-api-access-zxv6g\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.629973 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" event={"ID":"a19ea96b-8910-41e1-a8c6-901206473d72","Type":"ContainerDied","Data":"64391d6ae4937b344f1603411d063b96496700280189e4d15f35c709a41961bd"} Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.630025 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-dr42c" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.630125 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-k7rwx" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="registry-server" containerID="cri-o://b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d" gracePeriod=2 Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.630329 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64391d6ae4937b344f1603411d063b96496700280189e4d15f35c709a41961bd" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.685178 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp"] Nov 22 09:28:17 crc kubenswrapper[4693]: E1122 09:28:17.685561 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a19ea96b-8910-41e1-a8c6-901206473d72" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.685580 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="a19ea96b-8910-41e1-a8c6-901206473d72" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.685774 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="a19ea96b-8910-41e1-a8c6-901206473d72" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.686394 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.688343 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.688503 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.688730 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.689255 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.695723 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp"] Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.843053 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.843115 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-84xwj\" (UniqueName: \"kubernetes.io/projected/2a20394f-6ab1-4de1-aa64-df5a655364bb-kube-api-access-84xwj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.843243 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.945205 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.945349 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.945380 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-84xwj\" (UniqueName: \"kubernetes.io/projected/2a20394f-6ab1-4de1-aa64-df5a655364bb-kube-api-access-84xwj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.950602 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.952619 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:17 crc kubenswrapper[4693]: I1122 09:28:17.958131 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-84xwj\" (UniqueName: \"kubernetes.io/projected/2a20394f-6ab1-4de1-aa64-df5a655364bb-kube-api-access-84xwj\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-lwtlp\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.020439 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.074188 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.147809 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-utilities\") pod \"17fd32b3-9726-4e58-8464-65eb2361058c\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.147956 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sp5z7\" (UniqueName: \"kubernetes.io/projected/17fd32b3-9726-4e58-8464-65eb2361058c-kube-api-access-sp5z7\") pod \"17fd32b3-9726-4e58-8464-65eb2361058c\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.148004 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-catalog-content\") pod \"17fd32b3-9726-4e58-8464-65eb2361058c\" (UID: \"17fd32b3-9726-4e58-8464-65eb2361058c\") " Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.148720 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-utilities" (OuterVolumeSpecName: "utilities") pod "17fd32b3-9726-4e58-8464-65eb2361058c" (UID: "17fd32b3-9726-4e58-8464-65eb2361058c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.152775 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17fd32b3-9726-4e58-8464-65eb2361058c-kube-api-access-sp5z7" (OuterVolumeSpecName: "kube-api-access-sp5z7") pod "17fd32b3-9726-4e58-8464-65eb2361058c" (UID: "17fd32b3-9726-4e58-8464-65eb2361058c"). InnerVolumeSpecName "kube-api-access-sp5z7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.165808 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sp5z7\" (UniqueName: \"kubernetes.io/projected/17fd32b3-9726-4e58-8464-65eb2361058c-kube-api-access-sp5z7\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.165833 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.207270 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "17fd32b3-9726-4e58-8464-65eb2361058c" (UID: "17fd32b3-9726-4e58-8464-65eb2361058c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.267344 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/17fd32b3-9726-4e58-8464-65eb2361058c-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.503633 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp"] Nov 22 09:28:18 crc kubenswrapper[4693]: W1122 09:28:18.509019 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2a20394f_6ab1_4de1_aa64_df5a655364bb.slice/crio-29ed9505beb16d86578fd01e10078f7833b2b557856622bdd2730779014e6058 WatchSource:0}: Error finding container 29ed9505beb16d86578fd01e10078f7833b2b557856622bdd2730779014e6058: Status 404 returned error can't find the container with id 29ed9505beb16d86578fd01e10078f7833b2b557856622bdd2730779014e6058 Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.642671 4693 generic.go:334] "Generic (PLEG): container finished" podID="17fd32b3-9726-4e58-8464-65eb2361058c" containerID="b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d" exitCode=0 Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.642745 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-k7rwx" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.642734 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerDied","Data":"b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d"} Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.642832 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-k7rwx" event={"ID":"17fd32b3-9726-4e58-8464-65eb2361058c","Type":"ContainerDied","Data":"843ec159a5f747435229dc419f8b569f2a7cc8b812bbd981f91dae844734890a"} Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.642875 4693 scope.go:117] "RemoveContainer" containerID="b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.644948 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" event={"ID":"2a20394f-6ab1-4de1-aa64-df5a655364bb","Type":"ContainerStarted","Data":"29ed9505beb16d86578fd01e10078f7833b2b557856622bdd2730779014e6058"} Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.663795 4693 scope.go:117] "RemoveContainer" containerID="f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.684931 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-k7rwx"] Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.687874 4693 scope.go:117] "RemoveContainer" containerID="62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.691583 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-k7rwx"] Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.703592 4693 scope.go:117] "RemoveContainer" containerID="b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d" Nov 22 09:28:18 crc kubenswrapper[4693]: E1122 09:28:18.704003 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d\": container with ID starting with b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d not found: ID does not exist" containerID="b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.704037 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d"} err="failed to get container status \"b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d\": rpc error: code = NotFound desc = could not find container \"b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d\": container with ID starting with b7e9cc0edf932d0597a73be9475c502f328038bc3c39ce10c526aac141492d8d not found: ID does not exist" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.704066 4693 scope.go:117] "RemoveContainer" containerID="f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825" Nov 22 09:28:18 crc kubenswrapper[4693]: E1122 09:28:18.704507 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825\": container with ID starting with f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825 not found: ID does not exist" containerID="f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.704558 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825"} err="failed to get container status \"f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825\": rpc error: code = NotFound desc = could not find container \"f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825\": container with ID starting with f31030527847fa648c782a5010d1341aceaf6054864ae0c549e27ac392647825 not found: ID does not exist" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.704595 4693 scope.go:117] "RemoveContainer" containerID="62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5" Nov 22 09:28:18 crc kubenswrapper[4693]: E1122 09:28:18.704934 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5\": container with ID starting with 62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5 not found: ID does not exist" containerID="62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5" Nov 22 09:28:18 crc kubenswrapper[4693]: I1122 09:28:18.704988 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5"} err="failed to get container status \"62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5\": rpc error: code = NotFound desc = could not find container \"62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5\": container with ID starting with 62b8f9eab1d3701d000be3b02247194354d5c20c0cd9d7c2ef8c9f1c0d7bb3f5 not found: ID does not exist" Nov 22 09:28:19 crc kubenswrapper[4693]: I1122 09:28:19.654399 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" event={"ID":"2a20394f-6ab1-4de1-aa64-df5a655364bb","Type":"ContainerStarted","Data":"b194ddca938906545f1031eaf249884a6a94a7019f179c6e7e7b0c549facfa8b"} Nov 22 09:28:19 crc kubenswrapper[4693]: I1122 09:28:19.670626 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" podStartSLOduration=2.19937176 podStartE2EDuration="2.670610847s" podCreationTimestamp="2025-11-22 09:28:17 +0000 UTC" firstStartedPulling="2025-11-22 09:28:18.511428328 +0000 UTC m=+1494.653930620" lastFinishedPulling="2025-11-22 09:28:18.982667416 +0000 UTC m=+1495.125169707" observedRunningTime="2025-11-22 09:28:19.668026759 +0000 UTC m=+1495.810529050" watchObservedRunningTime="2025-11-22 09:28:19.670610847 +0000 UTC m=+1495.813113138" Nov 22 09:28:20 crc kubenswrapper[4693]: I1122 09:28:20.157860 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" path="/var/lib/kubelet/pods/17fd32b3-9726-4e58-8464-65eb2361058c/volumes" Nov 22 09:28:26 crc kubenswrapper[4693]: I1122 09:28:26.147331 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:28:26 crc kubenswrapper[4693]: E1122 09:28:26.148177 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:28:30 crc kubenswrapper[4693]: I1122 09:28:30.022990 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-qjqt5"] Nov 22 09:28:30 crc kubenswrapper[4693]: I1122 09:28:30.028165 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-qjqt5"] Nov 22 09:28:30 crc kubenswrapper[4693]: I1122 09:28:30.156422 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="be72ccb6-821d-4b5a-a4d0-6d866fc617f6" path="/var/lib/kubelet/pods/be72ccb6-821d-4b5a-a4d0-6d866fc617f6/volumes" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.081196 4693 scope.go:117] "RemoveContainer" containerID="8c1ad42526a5bb5229fb0809b8cef54e966ce4364be9da95831d425eddee6a26" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.106832 4693 scope.go:117] "RemoveContainer" containerID="943305a5ec8698210df9f8229165fca8538eb6c11e6514a7cbd4be236ea344a1" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.142013 4693 scope.go:117] "RemoveContainer" containerID="48d7f1b9410084beed5c4bfde3daa82178a1f319ffe68f5bb6aa4229e8c76d7d" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.171420 4693 scope.go:117] "RemoveContainer" containerID="47a89b32958565c3f16483178caa6a9d530c2af9b2095036fcc883f93922d363" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.199152 4693 scope.go:117] "RemoveContainer" containerID="2d028d413186bba2df087c41e44cfb7465dc826d0fa34308f732a16b3953bb35" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.246243 4693 scope.go:117] "RemoveContainer" containerID="2042e6179f43d538bc458776d10523f2ea26f48ae58965477d95716c30533a5f" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.269027 4693 scope.go:117] "RemoveContainer" containerID="ac54efd1cae29da2ce5c5ce4eddff8bd6b7ea698aef39cd58a921cf312509c04" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.301735 4693 scope.go:117] "RemoveContainer" containerID="b2e002a07cf5bf2d7444ec4de3b8a5bf0e08b258e98f53ba30a9e12168868073" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.343725 4693 scope.go:117] "RemoveContainer" containerID="e6701e1c9e385cbf112da03e925b1750253f22899795ac22018c98e6baf1d609" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.365275 4693 scope.go:117] "RemoveContainer" containerID="ef7fe073b128c6c52974d54cc3ff49bf804bf338d34a988f4f6f08bedd803563" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.384649 4693 scope.go:117] "RemoveContainer" containerID="f4b4a89a35e7854bde49334ddf1b4372005107a04cd01768c7ab9a094aa10292" Nov 22 09:28:32 crc kubenswrapper[4693]: I1122 09:28:32.407376 4693 scope.go:117] "RemoveContainer" containerID="6d46ebf25a2e46264e2d6cd4dd05ef59d57cebd5c50e003f481b4af24693e4e4" Nov 22 09:28:37 crc kubenswrapper[4693]: I1122 09:28:37.146963 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:28:37 crc kubenswrapper[4693]: E1122 09:28:37.147538 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:28:44 crc kubenswrapper[4693]: I1122 09:28:44.881362 4693 generic.go:334] "Generic (PLEG): container finished" podID="2a20394f-6ab1-4de1-aa64-df5a655364bb" containerID="b194ddca938906545f1031eaf249884a6a94a7019f179c6e7e7b0c549facfa8b" exitCode=0 Nov 22 09:28:44 crc kubenswrapper[4693]: I1122 09:28:44.881448 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" event={"ID":"2a20394f-6ab1-4de1-aa64-df5a655364bb","Type":"ContainerDied","Data":"b194ddca938906545f1031eaf249884a6a94a7019f179c6e7e7b0c549facfa8b"} Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.191111 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.292571 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-ssh-key\") pod \"2a20394f-6ab1-4de1-aa64-df5a655364bb\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.292701 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-84xwj\" (UniqueName: \"kubernetes.io/projected/2a20394f-6ab1-4de1-aa64-df5a655364bb-kube-api-access-84xwj\") pod \"2a20394f-6ab1-4de1-aa64-df5a655364bb\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.293875 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-inventory\") pod \"2a20394f-6ab1-4de1-aa64-df5a655364bb\" (UID: \"2a20394f-6ab1-4de1-aa64-df5a655364bb\") " Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.298284 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2a20394f-6ab1-4de1-aa64-df5a655364bb-kube-api-access-84xwj" (OuterVolumeSpecName: "kube-api-access-84xwj") pod "2a20394f-6ab1-4de1-aa64-df5a655364bb" (UID: "2a20394f-6ab1-4de1-aa64-df5a655364bb"). InnerVolumeSpecName "kube-api-access-84xwj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.314063 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2a20394f-6ab1-4de1-aa64-df5a655364bb" (UID: "2a20394f-6ab1-4de1-aa64-df5a655364bb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.314695 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-inventory" (OuterVolumeSpecName: "inventory") pod "2a20394f-6ab1-4de1-aa64-df5a655364bb" (UID: "2a20394f-6ab1-4de1-aa64-df5a655364bb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.395668 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-84xwj\" (UniqueName: \"kubernetes.io/projected/2a20394f-6ab1-4de1-aa64-df5a655364bb-kube-api-access-84xwj\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.395699 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.395710 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2a20394f-6ab1-4de1-aa64-df5a655364bb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.895387 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" event={"ID":"2a20394f-6ab1-4de1-aa64-df5a655364bb","Type":"ContainerDied","Data":"29ed9505beb16d86578fd01e10078f7833b2b557856622bdd2730779014e6058"} Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.895679 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="29ed9505beb16d86578fd01e10078f7833b2b557856622bdd2730779014e6058" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.895447 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-lwtlp" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.966281 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7"] Nov 22 09:28:46 crc kubenswrapper[4693]: E1122 09:28:46.966959 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="extract-utilities" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.967033 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="extract-utilities" Nov 22 09:28:46 crc kubenswrapper[4693]: E1122 09:28:46.967124 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="registry-server" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.967169 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="registry-server" Nov 22 09:28:46 crc kubenswrapper[4693]: E1122 09:28:46.967217 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="extract-content" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.967264 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="extract-content" Nov 22 09:28:46 crc kubenswrapper[4693]: E1122 09:28:46.967305 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2a20394f-6ab1-4de1-aa64-df5a655364bb" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.967351 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="2a20394f-6ab1-4de1-aa64-df5a655364bb" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.967637 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="17fd32b3-9726-4e58-8464-65eb2361058c" containerName="registry-server" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.967694 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="2a20394f-6ab1-4de1-aa64-df5a655364bb" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.968491 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.970869 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.971052 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.971273 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.973395 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:28:46 crc kubenswrapper[4693]: I1122 09:28:46.982375 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7"] Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.005240 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.005292 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.005439 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfg5d\" (UniqueName: \"kubernetes.io/projected/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-kube-api-access-pfg5d\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.107717 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfg5d\" (UniqueName: \"kubernetes.io/projected/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-kube-api-access-pfg5d\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.107827 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.107888 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.113130 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.113134 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.122166 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfg5d\" (UniqueName: \"kubernetes.io/projected/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-kube-api-access-pfg5d\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.281490 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.696773 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7"] Nov 22 09:28:47 crc kubenswrapper[4693]: I1122 09:28:47.903262 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" event={"ID":"7bd4f766-0c89-4aed-9f9a-15657c6e1efa","Type":"ContainerStarted","Data":"529acfa155fa9666467f37048974da27409c14ed6ed6238ff0a072244c728435"} Nov 22 09:28:48 crc kubenswrapper[4693]: I1122 09:28:48.910959 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" event={"ID":"7bd4f766-0c89-4aed-9f9a-15657c6e1efa","Type":"ContainerStarted","Data":"2ab4d19b41a210e11dbfcba786c692b4aa9f2875cddbc2c40f0551f196533b03"} Nov 22 09:28:48 crc kubenswrapper[4693]: I1122 09:28:48.924364 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" podStartSLOduration=2.457497566 podStartE2EDuration="2.924345749s" podCreationTimestamp="2025-11-22 09:28:46 +0000 UTC" firstStartedPulling="2025-11-22 09:28:47.703164425 +0000 UTC m=+1523.845666715" lastFinishedPulling="2025-11-22 09:28:48.170012607 +0000 UTC m=+1524.312514898" observedRunningTime="2025-11-22 09:28:48.922760769 +0000 UTC m=+1525.065263060" watchObservedRunningTime="2025-11-22 09:28:48.924345749 +0000 UTC m=+1525.066848040" Nov 22 09:28:51 crc kubenswrapper[4693]: I1122 09:28:51.147521 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:28:51 crc kubenswrapper[4693]: E1122 09:28:51.148016 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.035748 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-dc3e-account-create-tplp4"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.046416 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-a227-account-create-zx7zl"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.051133 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-dc3e-account-create-tplp4"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.057563 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-a227-account-create-zx7zl"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.063800 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-269rq"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.069997 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-lm5mh"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.074410 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-2bbd-account-create-phtjw"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.078764 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-hcwm4"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.083120 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-269rq"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.087429 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-lm5mh"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.091957 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-hcwm4"] Nov 22 09:29:03 crc kubenswrapper[4693]: I1122 09:29:03.096179 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-2bbd-account-create-phtjw"] Nov 22 09:29:04 crc kubenswrapper[4693]: I1122 09:29:04.158748 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e7e12bd-2fc6-445e-96bd-092859596ae4" path="/var/lib/kubelet/pods/0e7e12bd-2fc6-445e-96bd-092859596ae4/volumes" Nov 22 09:29:04 crc kubenswrapper[4693]: I1122 09:29:04.159446 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="81fa2494-ad98-4781-8a71-e518b21b6509" path="/var/lib/kubelet/pods/81fa2494-ad98-4781-8a71-e518b21b6509/volumes" Nov 22 09:29:04 crc kubenswrapper[4693]: I1122 09:29:04.160081 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9753c17-d077-44bf-a381-e3dd7e4aa505" path="/var/lib/kubelet/pods/a9753c17-d077-44bf-a381-e3dd7e4aa505/volumes" Nov 22 09:29:04 crc kubenswrapper[4693]: I1122 09:29:04.160657 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6f9b111-6452-4462-9e5e-fe38c86e823b" path="/var/lib/kubelet/pods/b6f9b111-6452-4462-9e5e-fe38c86e823b/volumes" Nov 22 09:29:04 crc kubenswrapper[4693]: I1122 09:29:04.161620 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c6f93f55-4ccc-49d4-b377-5fe5c859dd38" path="/var/lib/kubelet/pods/c6f93f55-4ccc-49d4-b377-5fe5c859dd38/volumes" Nov 22 09:29:04 crc kubenswrapper[4693]: I1122 09:29:04.162312 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eeb9054c-f2a0-4e1a-b95f-210fca359716" path="/var/lib/kubelet/pods/eeb9054c-f2a0-4e1a-b95f-210fca359716/volumes" Nov 22 09:29:05 crc kubenswrapper[4693]: I1122 09:29:05.146223 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:29:05 crc kubenswrapper[4693]: E1122 09:29:05.146587 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:29:19 crc kubenswrapper[4693]: I1122 09:29:19.147608 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:29:19 crc kubenswrapper[4693]: E1122 09:29:19.149334 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:29:22 crc kubenswrapper[4693]: I1122 09:29:22.176149 4693 generic.go:334] "Generic (PLEG): container finished" podID="7bd4f766-0c89-4aed-9f9a-15657c6e1efa" containerID="2ab4d19b41a210e11dbfcba786c692b4aa9f2875cddbc2c40f0551f196533b03" exitCode=0 Nov 22 09:29:22 crc kubenswrapper[4693]: I1122 09:29:22.176206 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" event={"ID":"7bd4f766-0c89-4aed-9f9a-15657c6e1efa","Type":"ContainerDied","Data":"2ab4d19b41a210e11dbfcba786c692b4aa9f2875cddbc2c40f0551f196533b03"} Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.499433 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.574683 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-inventory\") pod \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.574794 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-ssh-key\") pod \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.574828 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfg5d\" (UniqueName: \"kubernetes.io/projected/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-kube-api-access-pfg5d\") pod \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\" (UID: \"7bd4f766-0c89-4aed-9f9a-15657c6e1efa\") " Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.579896 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-kube-api-access-pfg5d" (OuterVolumeSpecName: "kube-api-access-pfg5d") pod "7bd4f766-0c89-4aed-9f9a-15657c6e1efa" (UID: "7bd4f766-0c89-4aed-9f9a-15657c6e1efa"). InnerVolumeSpecName "kube-api-access-pfg5d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.596374 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-inventory" (OuterVolumeSpecName: "inventory") pod "7bd4f766-0c89-4aed-9f9a-15657c6e1efa" (UID: "7bd4f766-0c89-4aed-9f9a-15657c6e1efa"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.596463 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7bd4f766-0c89-4aed-9f9a-15657c6e1efa" (UID: "7bd4f766-0c89-4aed-9f9a-15657c6e1efa"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.677305 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.677331 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:23 crc kubenswrapper[4693]: I1122 09:29:23.677344 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfg5d\" (UniqueName: \"kubernetes.io/projected/7bd4f766-0c89-4aed-9f9a-15657c6e1efa-kube-api-access-pfg5d\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.032311 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8g5n7"] Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.037393 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-8g5n7"] Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.154011 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8877d22-10d7-4304-8edc-7b0cfbf7f6c7" path="/var/lib/kubelet/pods/d8877d22-10d7-4304-8edc-7b0cfbf7f6c7/volumes" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.189609 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" event={"ID":"7bd4f766-0c89-4aed-9f9a-15657c6e1efa","Type":"ContainerDied","Data":"529acfa155fa9666467f37048974da27409c14ed6ed6238ff0a072244c728435"} Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.189643 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="529acfa155fa9666467f37048974da27409c14ed6ed6238ff0a072244c728435" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.189691 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.246381 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fd2vc"] Nov 22 09:29:24 crc kubenswrapper[4693]: E1122 09:29:24.246718 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7bd4f766-0c89-4aed-9f9a-15657c6e1efa" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.246737 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="7bd4f766-0c89-4aed-9f9a-15657c6e1efa" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.246933 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="7bd4f766-0c89-4aed-9f9a-15657c6e1efa" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.247478 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.249342 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.249374 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.249567 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.250116 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.261256 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fd2vc"] Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.285455 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.285588 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.285818 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvhrq\" (UniqueName: \"kubernetes.io/projected/12096b4e-7f75-4fcb-be29-331232a1e64b-kube-api-access-nvhrq\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.387769 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.387895 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.387997 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvhrq\" (UniqueName: \"kubernetes.io/projected/12096b4e-7f75-4fcb-be29-331232a1e64b-kube-api-access-nvhrq\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.393639 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.394338 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.403243 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvhrq\" (UniqueName: \"kubernetes.io/projected/12096b4e-7f75-4fcb-be29-331232a1e64b-kube-api-access-nvhrq\") pod \"ssh-known-hosts-edpm-deployment-fd2vc\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:24 crc kubenswrapper[4693]: I1122 09:29:24.565465 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:25 crc kubenswrapper[4693]: I1122 09:29:25.001142 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-fd2vc"] Nov 22 09:29:25 crc kubenswrapper[4693]: I1122 09:29:25.197374 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" event={"ID":"12096b4e-7f75-4fcb-be29-331232a1e64b","Type":"ContainerStarted","Data":"bcfe02c344d02586ed886d3a347327b6c011156c1d528159fd256991b279035f"} Nov 22 09:29:26 crc kubenswrapper[4693]: I1122 09:29:26.204146 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" event={"ID":"12096b4e-7f75-4fcb-be29-331232a1e64b","Type":"ContainerStarted","Data":"22e4b44cfd04b05f621f6fea536cb59ff315836eaf66c4f05976ae4d249999f5"} Nov 22 09:29:26 crc kubenswrapper[4693]: I1122 09:29:26.223246 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" podStartSLOduration=1.6010293789999999 podStartE2EDuration="2.223223243s" podCreationTimestamp="2025-11-22 09:29:24 +0000 UTC" firstStartedPulling="2025-11-22 09:29:25.018072491 +0000 UTC m=+1561.160574782" lastFinishedPulling="2025-11-22 09:29:25.640266355 +0000 UTC m=+1561.782768646" observedRunningTime="2025-11-22 09:29:26.216430731 +0000 UTC m=+1562.358933022" watchObservedRunningTime="2025-11-22 09:29:26.223223243 +0000 UTC m=+1562.365725534" Nov 22 09:29:31 crc kubenswrapper[4693]: I1122 09:29:31.146737 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:29:31 crc kubenswrapper[4693]: E1122 09:29:31.148265 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:29:31 crc kubenswrapper[4693]: I1122 09:29:31.237509 4693 generic.go:334] "Generic (PLEG): container finished" podID="12096b4e-7f75-4fcb-be29-331232a1e64b" containerID="22e4b44cfd04b05f621f6fea536cb59ff315836eaf66c4f05976ae4d249999f5" exitCode=0 Nov 22 09:29:31 crc kubenswrapper[4693]: I1122 09:29:31.237555 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" event={"ID":"12096b4e-7f75-4fcb-be29-331232a1e64b","Type":"ContainerDied","Data":"22e4b44cfd04b05f621f6fea536cb59ff315836eaf66c4f05976ae4d249999f5"} Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.548921 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.601140 4693 scope.go:117] "RemoveContainer" containerID="d3d99b046f44ee66d016c53e6c4d3e99fd158b053d16d82f13f10d781b80ed7c" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.620388 4693 scope.go:117] "RemoveContainer" containerID="c69d165f5c92a795d067c3eadc5f9d8f4fa0cf6bc95e8140e5f54fe920bb3be1" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.663001 4693 scope.go:117] "RemoveContainer" containerID="d57ee3b2f84c37a6926ed4183effbada0363b5c94eccacadd3007d2801f9fb3b" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.684598 4693 scope.go:117] "RemoveContainer" containerID="53e25c4a4e828a177a7d68eecd24c0bec484459f2be4ade2f284123a29496550" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.714536 4693 scope.go:117] "RemoveContainer" containerID="61a34cd6187c8d44e38b9f52f5df1af81419259841c18643e21ca3cbdeb86cf9" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.722478 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvhrq\" (UniqueName: \"kubernetes.io/projected/12096b4e-7f75-4fcb-be29-331232a1e64b-kube-api-access-nvhrq\") pod \"12096b4e-7f75-4fcb-be29-331232a1e64b\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.722635 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-inventory-0\") pod \"12096b4e-7f75-4fcb-be29-331232a1e64b\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.722666 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-ssh-key-openstack-edpm-ipam\") pod \"12096b4e-7f75-4fcb-be29-331232a1e64b\" (UID: \"12096b4e-7f75-4fcb-be29-331232a1e64b\") " Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.727981 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12096b4e-7f75-4fcb-be29-331232a1e64b-kube-api-access-nvhrq" (OuterVolumeSpecName: "kube-api-access-nvhrq") pod "12096b4e-7f75-4fcb-be29-331232a1e64b" (UID: "12096b4e-7f75-4fcb-be29-331232a1e64b"). InnerVolumeSpecName "kube-api-access-nvhrq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.730481 4693 scope.go:117] "RemoveContainer" containerID="eea75b40711b7b68512f654e7b7a0c0e8239c4d95d9aee8f21af98cd4fffb2d2" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.746818 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "12096b4e-7f75-4fcb-be29-331232a1e64b" (UID: "12096b4e-7f75-4fcb-be29-331232a1e64b"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.747084 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "12096b4e-7f75-4fcb-be29-331232a1e64b" (UID: "12096b4e-7f75-4fcb-be29-331232a1e64b"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.747408 4693 scope.go:117] "RemoveContainer" containerID="3f48aa8689853c6727c5c10ae7db5dba6c46700d9df2d0fa9387b002de53823e" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.824740 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvhrq\" (UniqueName: \"kubernetes.io/projected/12096b4e-7f75-4fcb-be29-331232a1e64b-kube-api-access-nvhrq\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.824767 4693 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-inventory-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:32 crc kubenswrapper[4693]: I1122 09:29:32.824777 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/12096b4e-7f75-4fcb-be29-331232a1e64b-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.261902 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" event={"ID":"12096b4e-7f75-4fcb-be29-331232a1e64b","Type":"ContainerDied","Data":"bcfe02c344d02586ed886d3a347327b6c011156c1d528159fd256991b279035f"} Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.261947 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcfe02c344d02586ed886d3a347327b6c011156c1d528159fd256991b279035f" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.262000 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-fd2vc" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.304584 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk"] Nov 22 09:29:33 crc kubenswrapper[4693]: E1122 09:29:33.305428 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12096b4e-7f75-4fcb-be29-331232a1e64b" containerName="ssh-known-hosts-edpm-deployment" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.305460 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="12096b4e-7f75-4fcb-be29-331232a1e64b" containerName="ssh-known-hosts-edpm-deployment" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.305808 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="12096b4e-7f75-4fcb-be29-331232a1e64b" containerName="ssh-known-hosts-edpm-deployment" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.307002 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.308586 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.308744 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.308990 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.311319 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.312332 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk"] Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.433490 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jmsg\" (UniqueName: \"kubernetes.io/projected/da9ee799-029a-485c-a5b6-bbdc64697c71-kube-api-access-6jmsg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.433537 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.433886 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.534985 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jmsg\" (UniqueName: \"kubernetes.io/projected/da9ee799-029a-485c-a5b6-bbdc64697c71-kube-api-access-6jmsg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.535019 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.535051 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.539347 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.539353 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.548142 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jmsg\" (UniqueName: \"kubernetes.io/projected/da9ee799-029a-485c-a5b6-bbdc64697c71-kube-api-access-6jmsg\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-lxlhk\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:33 crc kubenswrapper[4693]: I1122 09:29:33.620138 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:34 crc kubenswrapper[4693]: I1122 09:29:34.066623 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk"] Nov 22 09:29:34 crc kubenswrapper[4693]: I1122 09:29:34.273319 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" event={"ID":"da9ee799-029a-485c-a5b6-bbdc64697c71","Type":"ContainerStarted","Data":"78aa9c8c72034d071df83782be9b3c7e96cb0768a8be6584531474810d74f930"} Nov 22 09:29:35 crc kubenswrapper[4693]: I1122 09:29:35.283336 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" event={"ID":"da9ee799-029a-485c-a5b6-bbdc64697c71","Type":"ContainerStarted","Data":"d7fac8cccebabc647f8f95695bce6d9ca52d9d18eff6509376a2933029b207c2"} Nov 22 09:29:35 crc kubenswrapper[4693]: I1122 09:29:35.298455 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" podStartSLOduration=1.781116237 podStartE2EDuration="2.298440547s" podCreationTimestamp="2025-11-22 09:29:33 +0000 UTC" firstStartedPulling="2025-11-22 09:29:34.07425269 +0000 UTC m=+1570.216754982" lastFinishedPulling="2025-11-22 09:29:34.591577001 +0000 UTC m=+1570.734079292" observedRunningTime="2025-11-22 09:29:35.296434545 +0000 UTC m=+1571.438936837" watchObservedRunningTime="2025-11-22 09:29:35.298440547 +0000 UTC m=+1571.440942838" Nov 22 09:29:40 crc kubenswrapper[4693]: I1122 09:29:40.335538 4693 generic.go:334] "Generic (PLEG): container finished" podID="da9ee799-029a-485c-a5b6-bbdc64697c71" containerID="d7fac8cccebabc647f8f95695bce6d9ca52d9d18eff6509376a2933029b207c2" exitCode=0 Nov 22 09:29:40 crc kubenswrapper[4693]: I1122 09:29:40.335605 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" event={"ID":"da9ee799-029a-485c-a5b6-bbdc64697c71","Type":"ContainerDied","Data":"d7fac8cccebabc647f8f95695bce6d9ca52d9d18eff6509376a2933029b207c2"} Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.657745 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.775915 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-ssh-key\") pod \"da9ee799-029a-485c-a5b6-bbdc64697c71\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.776220 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-inventory\") pod \"da9ee799-029a-485c-a5b6-bbdc64697c71\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.776366 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jmsg\" (UniqueName: \"kubernetes.io/projected/da9ee799-029a-485c-a5b6-bbdc64697c71-kube-api-access-6jmsg\") pod \"da9ee799-029a-485c-a5b6-bbdc64697c71\" (UID: \"da9ee799-029a-485c-a5b6-bbdc64697c71\") " Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.781649 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da9ee799-029a-485c-a5b6-bbdc64697c71-kube-api-access-6jmsg" (OuterVolumeSpecName: "kube-api-access-6jmsg") pod "da9ee799-029a-485c-a5b6-bbdc64697c71" (UID: "da9ee799-029a-485c-a5b6-bbdc64697c71"). InnerVolumeSpecName "kube-api-access-6jmsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.799445 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "da9ee799-029a-485c-a5b6-bbdc64697c71" (UID: "da9ee799-029a-485c-a5b6-bbdc64697c71"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.802499 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-inventory" (OuterVolumeSpecName: "inventory") pod "da9ee799-029a-485c-a5b6-bbdc64697c71" (UID: "da9ee799-029a-485c-a5b6-bbdc64697c71"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.878930 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jmsg\" (UniqueName: \"kubernetes.io/projected/da9ee799-029a-485c-a5b6-bbdc64697c71-kube-api-access-6jmsg\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.878961 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:41 crc kubenswrapper[4693]: I1122 09:29:41.878971 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/da9ee799-029a-485c-a5b6-bbdc64697c71-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.351860 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" event={"ID":"da9ee799-029a-485c-a5b6-bbdc64697c71","Type":"ContainerDied","Data":"78aa9c8c72034d071df83782be9b3c7e96cb0768a8be6584531474810d74f930"} Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.351885 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-lxlhk" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.351894 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="78aa9c8c72034d071df83782be9b3c7e96cb0768a8be6584531474810d74f930" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.406516 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t"] Nov 22 09:29:42 crc kubenswrapper[4693]: E1122 09:29:42.407225 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da9ee799-029a-485c-a5b6-bbdc64697c71" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.407243 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="da9ee799-029a-485c-a5b6-bbdc64697c71" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.407429 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="da9ee799-029a-485c-a5b6-bbdc64697c71" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.408131 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.409595 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.409863 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.409980 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.410101 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.412892 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t"] Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.487720 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f26xd\" (UniqueName: \"kubernetes.io/projected/b32380c7-b430-47aa-8694-054df2442f2b-kube-api-access-f26xd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.487958 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.487984 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.589546 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f26xd\" (UniqueName: \"kubernetes.io/projected/b32380c7-b430-47aa-8694-054df2442f2b-kube-api-access-f26xd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.589682 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.589704 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.592888 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.592895 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.604127 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f26xd\" (UniqueName: \"kubernetes.io/projected/b32380c7-b430-47aa-8694-054df2442f2b-kube-api-access-f26xd\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:42 crc kubenswrapper[4693]: I1122 09:29:42.719868 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:43 crc kubenswrapper[4693]: I1122 09:29:43.026033 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tg5lr"] Nov 22 09:29:43 crc kubenswrapper[4693]: I1122 09:29:43.031315 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-tg5lr"] Nov 22 09:29:43 crc kubenswrapper[4693]: I1122 09:29:43.138570 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t"] Nov 22 09:29:43 crc kubenswrapper[4693]: I1122 09:29:43.360063 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" event={"ID":"b32380c7-b430-47aa-8694-054df2442f2b","Type":"ContainerStarted","Data":"0932daba972520204258ba9d6f19f3d1c1b6e0c7e2c6d0c30e9127e25a77df8b"} Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.018892 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-ljh2g"] Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.023811 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-ljh2g"] Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.155145 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.155417 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="310ec6d0-8daa-42c9-ac3e-96f791743124" path="/var/lib/kubelet/pods/310ec6d0-8daa-42c9-ac3e-96f791743124/volumes" Nov 22 09:29:44 crc kubenswrapper[4693]: E1122 09:29:44.155797 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.156379 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7c78d850-6c37-4f3b-920c-d64e4c136e47" path="/var/lib/kubelet/pods/7c78d850-6c37-4f3b-920c-d64e4c136e47/volumes" Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.367669 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" event={"ID":"b32380c7-b430-47aa-8694-054df2442f2b","Type":"ContainerStarted","Data":"e053c4598c415c237d7bce5538e6bc68eca80e2091c919bb666f52af5ad03b13"} Nov 22 09:29:44 crc kubenswrapper[4693]: I1122 09:29:44.386865 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" podStartSLOduration=1.927783016 podStartE2EDuration="2.386832944s" podCreationTimestamp="2025-11-22 09:29:42 +0000 UTC" firstStartedPulling="2025-11-22 09:29:43.147568239 +0000 UTC m=+1579.290070530" lastFinishedPulling="2025-11-22 09:29:43.606618168 +0000 UTC m=+1579.749120458" observedRunningTime="2025-11-22 09:29:44.378534651 +0000 UTC m=+1580.521036942" watchObservedRunningTime="2025-11-22 09:29:44.386832944 +0000 UTC m=+1580.529335235" Nov 22 09:29:50 crc kubenswrapper[4693]: I1122 09:29:50.409974 4693 generic.go:334] "Generic (PLEG): container finished" podID="b32380c7-b430-47aa-8694-054df2442f2b" containerID="e053c4598c415c237d7bce5538e6bc68eca80e2091c919bb666f52af5ad03b13" exitCode=0 Nov 22 09:29:50 crc kubenswrapper[4693]: I1122 09:29:50.410047 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" event={"ID":"b32380c7-b430-47aa-8694-054df2442f2b","Type":"ContainerDied","Data":"e053c4598c415c237d7bce5538e6bc68eca80e2091c919bb666f52af5ad03b13"} Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.730751 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.849182 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f26xd\" (UniqueName: \"kubernetes.io/projected/b32380c7-b430-47aa-8694-054df2442f2b-kube-api-access-f26xd\") pod \"b32380c7-b430-47aa-8694-054df2442f2b\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.849253 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-ssh-key\") pod \"b32380c7-b430-47aa-8694-054df2442f2b\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.849458 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-inventory\") pod \"b32380c7-b430-47aa-8694-054df2442f2b\" (UID: \"b32380c7-b430-47aa-8694-054df2442f2b\") " Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.854711 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b32380c7-b430-47aa-8694-054df2442f2b-kube-api-access-f26xd" (OuterVolumeSpecName: "kube-api-access-f26xd") pod "b32380c7-b430-47aa-8694-054df2442f2b" (UID: "b32380c7-b430-47aa-8694-054df2442f2b"). InnerVolumeSpecName "kube-api-access-f26xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.871567 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b32380c7-b430-47aa-8694-054df2442f2b" (UID: "b32380c7-b430-47aa-8694-054df2442f2b"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.871865 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-inventory" (OuterVolumeSpecName: "inventory") pod "b32380c7-b430-47aa-8694-054df2442f2b" (UID: "b32380c7-b430-47aa-8694-054df2442f2b"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.951549 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.951587 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b32380c7-b430-47aa-8694-054df2442f2b-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:51 crc kubenswrapper[4693]: I1122 09:29:51.951602 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f26xd\" (UniqueName: \"kubernetes.io/projected/b32380c7-b430-47aa-8694-054df2442f2b-kube-api-access-f26xd\") on node \"crc\" DevicePath \"\"" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.423703 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" event={"ID":"b32380c7-b430-47aa-8694-054df2442f2b","Type":"ContainerDied","Data":"0932daba972520204258ba9d6f19f3d1c1b6e0c7e2c6d0c30e9127e25a77df8b"} Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.424021 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0932daba972520204258ba9d6f19f3d1c1b6e0c7e2c6d0c30e9127e25a77df8b" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.423761 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.469916 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5"] Nov 22 09:29:52 crc kubenswrapper[4693]: E1122 09:29:52.470276 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b32380c7-b430-47aa-8694-054df2442f2b" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.470294 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="b32380c7-b430-47aa-8694-054df2442f2b" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.470477 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="b32380c7-b430-47aa-8694-054df2442f2b" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.471164 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.475412 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.475416 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.475539 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.476686 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.476813 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.479943 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5"] Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.484271 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.484429 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.484617 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562471 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562610 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562670 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mlvct\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-kube-api-access-mlvct\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562706 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562735 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562771 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562795 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562917 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562951 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.562976 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.563039 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.563074 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.563098 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.563121 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.663916 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.663957 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mlvct\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-kube-api-access-mlvct\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.663988 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664014 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664044 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664063 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664123 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664194 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664226 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664255 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664273 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664292 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664311 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.664343 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.668041 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.668209 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.668325 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.668336 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.668460 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.669497 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.669554 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.669673 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.670337 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.670635 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.670764 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.671220 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.671875 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.677750 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mlvct\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-kube-api-access-mlvct\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:52 crc kubenswrapper[4693]: I1122 09:29:52.788761 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:29:53 crc kubenswrapper[4693]: I1122 09:29:53.196466 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5"] Nov 22 09:29:53 crc kubenswrapper[4693]: I1122 09:29:53.435354 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" event={"ID":"118d3e39-2ce7-4a88-b9c7-869e5e83a568","Type":"ContainerStarted","Data":"8593f5aadd9829cab8c619cc6b96dcb51727c567fd38a8067e36cc04e8366bef"} Nov 22 09:29:54 crc kubenswrapper[4693]: I1122 09:29:54.443047 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" event={"ID":"118d3e39-2ce7-4a88-b9c7-869e5e83a568","Type":"ContainerStarted","Data":"7e09dfa793550b7aac7f6f863dc6f9c2a0e320bca843cde04fe7d652efffc96b"} Nov 22 09:29:54 crc kubenswrapper[4693]: I1122 09:29:54.465671 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" podStartSLOduration=2.016526175 podStartE2EDuration="2.465656793s" podCreationTimestamp="2025-11-22 09:29:52 +0000 UTC" firstStartedPulling="2025-11-22 09:29:53.202089372 +0000 UTC m=+1589.344591663" lastFinishedPulling="2025-11-22 09:29:53.65121999 +0000 UTC m=+1589.793722281" observedRunningTime="2025-11-22 09:29:54.460033921 +0000 UTC m=+1590.602536212" watchObservedRunningTime="2025-11-22 09:29:54.465656793 +0000 UTC m=+1590.608159084" Nov 22 09:29:57 crc kubenswrapper[4693]: I1122 09:29:57.147093 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:29:57 crc kubenswrapper[4693]: E1122 09:29:57.147579 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.126716 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp"] Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.128682 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.130927 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.131448 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.133227 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp"] Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.277539 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/456ac8dc-fa47-4a74-91f0-2143b3e21238-config-volume\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.277726 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/456ac8dc-fa47-4a74-91f0-2143b3e21238-secret-volume\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.277917 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s9xwg\" (UniqueName: \"kubernetes.io/projected/456ac8dc-fa47-4a74-91f0-2143b3e21238-kube-api-access-s9xwg\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.379638 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/456ac8dc-fa47-4a74-91f0-2143b3e21238-config-volume\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.379902 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/456ac8dc-fa47-4a74-91f0-2143b3e21238-secret-volume\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.380064 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s9xwg\" (UniqueName: \"kubernetes.io/projected/456ac8dc-fa47-4a74-91f0-2143b3e21238-kube-api-access-s9xwg\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.380410 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/456ac8dc-fa47-4a74-91f0-2143b3e21238-config-volume\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.386566 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/456ac8dc-fa47-4a74-91f0-2143b3e21238-secret-volume\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.395431 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s9xwg\" (UniqueName: \"kubernetes.io/projected/456ac8dc-fa47-4a74-91f0-2143b3e21238-kube-api-access-s9xwg\") pod \"collect-profiles-29396730-7vnxp\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.446948 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:00 crc kubenswrapper[4693]: I1122 09:30:00.813464 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp"] Nov 22 09:30:00 crc kubenswrapper[4693]: W1122 09:30:00.816881 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod456ac8dc_fa47_4a74_91f0_2143b3e21238.slice/crio-c468ed80d3ee2e2bce1f17c30a154a4fd610bf1777e9771fcba3a7e2890436f4 WatchSource:0}: Error finding container c468ed80d3ee2e2bce1f17c30a154a4fd610bf1777e9771fcba3a7e2890436f4: Status 404 returned error can't find the container with id c468ed80d3ee2e2bce1f17c30a154a4fd610bf1777e9771fcba3a7e2890436f4 Nov 22 09:30:01 crc kubenswrapper[4693]: I1122 09:30:01.484613 4693 generic.go:334] "Generic (PLEG): container finished" podID="456ac8dc-fa47-4a74-91f0-2143b3e21238" containerID="6de074bc4aad1050540250252d14c834408367d231a0c9d8cd7b93fa027275c7" exitCode=0 Nov 22 09:30:01 crc kubenswrapper[4693]: I1122 09:30:01.484681 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" event={"ID":"456ac8dc-fa47-4a74-91f0-2143b3e21238","Type":"ContainerDied","Data":"6de074bc4aad1050540250252d14c834408367d231a0c9d8cd7b93fa027275c7"} Nov 22 09:30:01 crc kubenswrapper[4693]: I1122 09:30:01.484895 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" event={"ID":"456ac8dc-fa47-4a74-91f0-2143b3e21238","Type":"ContainerStarted","Data":"c468ed80d3ee2e2bce1f17c30a154a4fd610bf1777e9771fcba3a7e2890436f4"} Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.733105 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.921721 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/456ac8dc-fa47-4a74-91f0-2143b3e21238-config-volume\") pod \"456ac8dc-fa47-4a74-91f0-2143b3e21238\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.922114 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/456ac8dc-fa47-4a74-91f0-2143b3e21238-secret-volume\") pod \"456ac8dc-fa47-4a74-91f0-2143b3e21238\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.922147 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s9xwg\" (UniqueName: \"kubernetes.io/projected/456ac8dc-fa47-4a74-91f0-2143b3e21238-kube-api-access-s9xwg\") pod \"456ac8dc-fa47-4a74-91f0-2143b3e21238\" (UID: \"456ac8dc-fa47-4a74-91f0-2143b3e21238\") " Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.922578 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/456ac8dc-fa47-4a74-91f0-2143b3e21238-config-volume" (OuterVolumeSpecName: "config-volume") pod "456ac8dc-fa47-4a74-91f0-2143b3e21238" (UID: "456ac8dc-fa47-4a74-91f0-2143b3e21238"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.922983 4693 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/456ac8dc-fa47-4a74-91f0-2143b3e21238-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.927796 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/456ac8dc-fa47-4a74-91f0-2143b3e21238-kube-api-access-s9xwg" (OuterVolumeSpecName: "kube-api-access-s9xwg") pod "456ac8dc-fa47-4a74-91f0-2143b3e21238" (UID: "456ac8dc-fa47-4a74-91f0-2143b3e21238"). InnerVolumeSpecName "kube-api-access-s9xwg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:30:02 crc kubenswrapper[4693]: I1122 09:30:02.927945 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/456ac8dc-fa47-4a74-91f0-2143b3e21238-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "456ac8dc-fa47-4a74-91f0-2143b3e21238" (UID: "456ac8dc-fa47-4a74-91f0-2143b3e21238"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:03 crc kubenswrapper[4693]: I1122 09:30:03.024952 4693 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/456ac8dc-fa47-4a74-91f0-2143b3e21238-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:03 crc kubenswrapper[4693]: I1122 09:30:03.024978 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s9xwg\" (UniqueName: \"kubernetes.io/projected/456ac8dc-fa47-4a74-91f0-2143b3e21238-kube-api-access-s9xwg\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:03 crc kubenswrapper[4693]: I1122 09:30:03.499073 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" event={"ID":"456ac8dc-fa47-4a74-91f0-2143b3e21238","Type":"ContainerDied","Data":"c468ed80d3ee2e2bce1f17c30a154a4fd610bf1777e9771fcba3a7e2890436f4"} Nov 22 09:30:03 crc kubenswrapper[4693]: I1122 09:30:03.499116 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c468ed80d3ee2e2bce1f17c30a154a4fd610bf1777e9771fcba3a7e2890436f4" Nov 22 09:30:03 crc kubenswrapper[4693]: I1122 09:30:03.499165 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396730-7vnxp" Nov 22 09:30:10 crc kubenswrapper[4693]: I1122 09:30:10.146576 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:30:10 crc kubenswrapper[4693]: E1122 09:30:10.147425 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:30:19 crc kubenswrapper[4693]: I1122 09:30:19.621420 4693 generic.go:334] "Generic (PLEG): container finished" podID="118d3e39-2ce7-4a88-b9c7-869e5e83a568" containerID="7e09dfa793550b7aac7f6f863dc6f9c2a0e320bca843cde04fe7d652efffc96b" exitCode=0 Nov 22 09:30:19 crc kubenswrapper[4693]: I1122 09:30:19.621507 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" event={"ID":"118d3e39-2ce7-4a88-b9c7-869e5e83a568","Type":"ContainerDied","Data":"7e09dfa793550b7aac7f6f863dc6f9c2a0e320bca843cde04fe7d652efffc96b"} Nov 22 09:30:20 crc kubenswrapper[4693]: I1122 09:30:20.958556 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055497 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-inventory\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055550 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ovn-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055596 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-repo-setup-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055664 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-bootstrap-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055732 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ssh-key\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055768 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055875 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055901 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055924 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-libvirt-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055941 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-nova-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.055998 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-ovn-default-certs-0\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.056034 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-telemetry-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.056073 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-neutron-metadata-combined-ca-bundle\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.056120 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mlvct\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-kube-api-access-mlvct\") pod \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\" (UID: \"118d3e39-2ce7-4a88-b9c7-869e5e83a568\") " Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.061724 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.062290 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.062318 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.063124 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.063534 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-kube-api-access-mlvct" (OuterVolumeSpecName: "kube-api-access-mlvct") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "kube-api-access-mlvct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.064224 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.064281 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.065672 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.065780 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.065791 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.066058 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.066894 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.082127 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-inventory" (OuterVolumeSpecName: "inventory") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.082525 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "118d3e39-2ce7-4a88-b9c7-869e5e83a568" (UID: "118d3e39-2ce7-4a88-b9c7-869e5e83a568"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159427 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159590 4693 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159653 4693 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159706 4693 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159763 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159815 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159908 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.159974 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.160024 4693 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.160072 4693 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.160125 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.160175 4693 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.160225 4693 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/118d3e39-2ce7-4a88-b9c7-869e5e83a568-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.160285 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mlvct\" (UniqueName: \"kubernetes.io/projected/118d3e39-2ce7-4a88-b9c7-869e5e83a568-kube-api-access-mlvct\") on node \"crc\" DevicePath \"\"" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.637028 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" event={"ID":"118d3e39-2ce7-4a88-b9c7-869e5e83a568","Type":"ContainerDied","Data":"8593f5aadd9829cab8c619cc6b96dcb51727c567fd38a8067e36cc04e8366bef"} Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.637078 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8593f5aadd9829cab8c619cc6b96dcb51727c567fd38a8067e36cc04e8366bef" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.637077 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.735955 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj"] Nov 22 09:30:21 crc kubenswrapper[4693]: E1122 09:30:21.736755 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="456ac8dc-fa47-4a74-91f0-2143b3e21238" containerName="collect-profiles" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.736792 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="456ac8dc-fa47-4a74-91f0-2143b3e21238" containerName="collect-profiles" Nov 22 09:30:21 crc kubenswrapper[4693]: E1122 09:30:21.736884 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="118d3e39-2ce7-4a88-b9c7-869e5e83a568" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.736893 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="118d3e39-2ce7-4a88-b9c7-869e5e83a568" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.737144 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="118d3e39-2ce7-4a88-b9c7-869e5e83a568" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.737177 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="456ac8dc-fa47-4a74-91f0-2143b3e21238" containerName="collect-profiles" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.738295 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.741373 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.741433 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.741455 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.741618 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.742975 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj"] Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.745168 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.884222 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.884276 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw2br\" (UniqueName: \"kubernetes.io/projected/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-kube-api-access-cw2br\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.884359 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.884390 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.884436 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.985817 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.986486 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.986545 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw2br\" (UniqueName: \"kubernetes.io/projected/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-kube-api-access-cw2br\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.986629 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.986661 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.988703 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.989340 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.991926 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:21 crc kubenswrapper[4693]: I1122 09:30:21.992355 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:22 crc kubenswrapper[4693]: I1122 09:30:22.003504 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw2br\" (UniqueName: \"kubernetes.io/projected/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-kube-api-access-cw2br\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-gtdvj\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:22 crc kubenswrapper[4693]: I1122 09:30:22.055402 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:30:22 crc kubenswrapper[4693]: I1122 09:30:22.521348 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj"] Nov 22 09:30:22 crc kubenswrapper[4693]: I1122 09:30:22.646004 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" event={"ID":"e06049f5-cf35-4685-9cae-cf2c1cfa2dda","Type":"ContainerStarted","Data":"39154f4a6bb7ba425e7964720d34ddf928da8bc6bbfba9925364f990c1b6b252"} Nov 22 09:30:23 crc kubenswrapper[4693]: I1122 09:30:23.658586 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" event={"ID":"e06049f5-cf35-4685-9cae-cf2c1cfa2dda","Type":"ContainerStarted","Data":"ce8d498c9d4c9638630d39c261ca89041a873246bc4b3d16888ec153fbf31a0d"} Nov 22 09:30:23 crc kubenswrapper[4693]: I1122 09:30:23.676677 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" podStartSLOduration=2.144304696 podStartE2EDuration="2.67666002s" podCreationTimestamp="2025-11-22 09:30:21 +0000 UTC" firstStartedPulling="2025-11-22 09:30:22.529391963 +0000 UTC m=+1618.671894253" lastFinishedPulling="2025-11-22 09:30:23.061747286 +0000 UTC m=+1619.204249577" observedRunningTime="2025-11-22 09:30:23.671963339 +0000 UTC m=+1619.814465630" watchObservedRunningTime="2025-11-22 09:30:23.67666002 +0000 UTC m=+1619.819162311" Nov 22 09:30:25 crc kubenswrapper[4693]: I1122 09:30:25.146803 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:30:25 crc kubenswrapper[4693]: E1122 09:30:25.147632 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:30:28 crc kubenswrapper[4693]: I1122 09:30:28.030376 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgpc5"] Nov 22 09:30:28 crc kubenswrapper[4693]: I1122 09:30:28.035276 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgpc5"] Nov 22 09:30:28 crc kubenswrapper[4693]: I1122 09:30:28.154207 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="215c5e95-2af2-4e5f-8ff4-1587744ac34d" path="/var/lib/kubelet/pods/215c5e95-2af2-4e5f-8ff4-1587744ac34d/volumes" Nov 22 09:30:32 crc kubenswrapper[4693]: I1122 09:30:32.842758 4693 scope.go:117] "RemoveContainer" containerID="1b3f757c798e5c588e3a3b320fd7c11ee0a5bf14d282349c92ee5510aefe6b22" Nov 22 09:30:32 crc kubenswrapper[4693]: I1122 09:30:32.870913 4693 scope.go:117] "RemoveContainer" containerID="edc14c25cacc83c8e05fa203ad44366adb22cef814080ee11e22e32af667b51a" Nov 22 09:30:32 crc kubenswrapper[4693]: I1122 09:30:32.914980 4693 scope.go:117] "RemoveContainer" containerID="e2132bbd7701e65be6d6136f4683600c378e044e916742cd455bd97bef24f4d9" Nov 22 09:30:36 crc kubenswrapper[4693]: I1122 09:30:36.146728 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:30:36 crc kubenswrapper[4693]: E1122 09:30:36.147395 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:30:51 crc kubenswrapper[4693]: I1122 09:30:51.147066 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:30:51 crc kubenswrapper[4693]: E1122 09:30:51.147900 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:31:03 crc kubenswrapper[4693]: I1122 09:31:03.146788 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:31:03 crc kubenswrapper[4693]: E1122 09:31:03.147516 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:31:05 crc kubenswrapper[4693]: I1122 09:31:05.962646 4693 generic.go:334] "Generic (PLEG): container finished" podID="e06049f5-cf35-4685-9cae-cf2c1cfa2dda" containerID="ce8d498c9d4c9638630d39c261ca89041a873246bc4b3d16888ec153fbf31a0d" exitCode=0 Nov 22 09:31:05 crc kubenswrapper[4693]: I1122 09:31:05.962761 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" event={"ID":"e06049f5-cf35-4685-9cae-cf2c1cfa2dda","Type":"ContainerDied","Data":"ce8d498c9d4c9638630d39c261ca89041a873246bc4b3d16888ec153fbf31a0d"} Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.329924 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.428730 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovncontroller-config-0\") pod \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.428790 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw2br\" (UniqueName: \"kubernetes.io/projected/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-kube-api-access-cw2br\") pod \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.428853 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovn-combined-ca-bundle\") pod \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.428886 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ssh-key\") pod \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.434509 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "e06049f5-cf35-4685-9cae-cf2c1cfa2dda" (UID: "e06049f5-cf35-4685-9cae-cf2c1cfa2dda"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.435400 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-kube-api-access-cw2br" (OuterVolumeSpecName: "kube-api-access-cw2br") pod "e06049f5-cf35-4685-9cae-cf2c1cfa2dda" (UID: "e06049f5-cf35-4685-9cae-cf2c1cfa2dda"). InnerVolumeSpecName "kube-api-access-cw2br". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.448627 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e06049f5-cf35-4685-9cae-cf2c1cfa2dda" (UID: "e06049f5-cf35-4685-9cae-cf2c1cfa2dda"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.448744 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "e06049f5-cf35-4685-9cae-cf2c1cfa2dda" (UID: "e06049f5-cf35-4685-9cae-cf2c1cfa2dda"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.530563 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-inventory\") pod \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\" (UID: \"e06049f5-cf35-4685-9cae-cf2c1cfa2dda\") " Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.531519 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.531746 4693 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.531758 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw2br\" (UniqueName: \"kubernetes.io/projected/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-kube-api-access-cw2br\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.531768 4693 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.556966 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-inventory" (OuterVolumeSpecName: "inventory") pod "e06049f5-cf35-4685-9cae-cf2c1cfa2dda" (UID: "e06049f5-cf35-4685-9cae-cf2c1cfa2dda"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.634294 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e06049f5-cf35-4685-9cae-cf2c1cfa2dda-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.981753 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" event={"ID":"e06049f5-cf35-4685-9cae-cf2c1cfa2dda","Type":"ContainerDied","Data":"39154f4a6bb7ba425e7964720d34ddf928da8bc6bbfba9925364f990c1b6b252"} Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.981814 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-gtdvj" Nov 22 09:31:07 crc kubenswrapper[4693]: I1122 09:31:07.981866 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="39154f4a6bb7ba425e7964720d34ddf928da8bc6bbfba9925364f990c1b6b252" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.052136 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt"] Nov 22 09:31:08 crc kubenswrapper[4693]: E1122 09:31:08.052474 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e06049f5-cf35-4685-9cae-cf2c1cfa2dda" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.052494 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e06049f5-cf35-4685-9cae-cf2c1cfa2dda" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.052668 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e06049f5-cf35-4685-9cae-cf2c1cfa2dda" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.053236 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.055021 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.055205 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.055224 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.057285 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.057583 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.057763 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.067312 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt"] Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.146298 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.146579 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.146915 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.147033 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbstz\" (UniqueName: \"kubernetes.io/projected/ecdf1a42-8112-4bcc-b356-e364d56b93bb-kube-api-access-wbstz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.147142 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.147229 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.248885 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.248938 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.249013 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.249055 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.249121 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.249151 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbstz\" (UniqueName: \"kubernetes.io/projected/ecdf1a42-8112-4bcc-b356-e364d56b93bb-kube-api-access-wbstz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.252718 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.253176 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.253177 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.253357 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.253653 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.264347 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbstz\" (UniqueName: \"kubernetes.io/projected/ecdf1a42-8112-4bcc-b356-e364d56b93bb-kube-api-access-wbstz\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.368946 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.804978 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt"] Nov 22 09:31:08 crc kubenswrapper[4693]: I1122 09:31:08.990944 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" event={"ID":"ecdf1a42-8112-4bcc-b356-e364d56b93bb","Type":"ContainerStarted","Data":"6a74d701db9f1ead2979072725b0b905059682a4ed0275086c0b396033b1909e"} Nov 22 09:31:10 crc kubenswrapper[4693]: I1122 09:31:10.000653 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" event={"ID":"ecdf1a42-8112-4bcc-b356-e364d56b93bb","Type":"ContainerStarted","Data":"1eef5a28a390e303177d3116899ff46d2d7c025e621b2c7e7330d3affc192f05"} Nov 22 09:31:10 crc kubenswrapper[4693]: I1122 09:31:10.020712 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" podStartSLOduration=1.5390249219999999 podStartE2EDuration="2.020694601s" podCreationTimestamp="2025-11-22 09:31:08 +0000 UTC" firstStartedPulling="2025-11-22 09:31:08.808089214 +0000 UTC m=+1664.950591505" lastFinishedPulling="2025-11-22 09:31:09.289758893 +0000 UTC m=+1665.432261184" observedRunningTime="2025-11-22 09:31:10.016600031 +0000 UTC m=+1666.159102322" watchObservedRunningTime="2025-11-22 09:31:10.020694601 +0000 UTC m=+1666.163196892" Nov 22 09:31:17 crc kubenswrapper[4693]: I1122 09:31:17.146973 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:31:17 crc kubenswrapper[4693]: E1122 09:31:17.147740 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:31:31 crc kubenswrapper[4693]: I1122 09:31:31.146349 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:31:31 crc kubenswrapper[4693]: E1122 09:31:31.147199 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:31:42 crc kubenswrapper[4693]: I1122 09:31:42.231771 4693 generic.go:334] "Generic (PLEG): container finished" podID="ecdf1a42-8112-4bcc-b356-e364d56b93bb" containerID="1eef5a28a390e303177d3116899ff46d2d7c025e621b2c7e7330d3affc192f05" exitCode=0 Nov 22 09:31:42 crc kubenswrapper[4693]: I1122 09:31:42.231836 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" event={"ID":"ecdf1a42-8112-4bcc-b356-e364d56b93bb","Type":"ContainerDied","Data":"1eef5a28a390e303177d3116899ff46d2d7c025e621b2c7e7330d3affc192f05"} Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.587732 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.703982 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-nova-metadata-neutron-config-0\") pod \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.704042 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-ovn-metadata-agent-neutron-config-0\") pod \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.704087 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-inventory\") pod \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.704216 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-metadata-combined-ca-bundle\") pod \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.704663 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbstz\" (UniqueName: \"kubernetes.io/projected/ecdf1a42-8112-4bcc-b356-e364d56b93bb-kube-api-access-wbstz\") pod \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.704773 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-ssh-key\") pod \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\" (UID: \"ecdf1a42-8112-4bcc-b356-e364d56b93bb\") " Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.710503 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "ecdf1a42-8112-4bcc-b356-e364d56b93bb" (UID: "ecdf1a42-8112-4bcc-b356-e364d56b93bb"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.710537 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ecdf1a42-8112-4bcc-b356-e364d56b93bb-kube-api-access-wbstz" (OuterVolumeSpecName: "kube-api-access-wbstz") pod "ecdf1a42-8112-4bcc-b356-e364d56b93bb" (UID: "ecdf1a42-8112-4bcc-b356-e364d56b93bb"). InnerVolumeSpecName "kube-api-access-wbstz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.727521 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ecdf1a42-8112-4bcc-b356-e364d56b93bb" (UID: "ecdf1a42-8112-4bcc-b356-e364d56b93bb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.731139 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "ecdf1a42-8112-4bcc-b356-e364d56b93bb" (UID: "ecdf1a42-8112-4bcc-b356-e364d56b93bb"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.735898 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "ecdf1a42-8112-4bcc-b356-e364d56b93bb" (UID: "ecdf1a42-8112-4bcc-b356-e364d56b93bb"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.739113 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-inventory" (OuterVolumeSpecName: "inventory") pod "ecdf1a42-8112-4bcc-b356-e364d56b93bb" (UID: "ecdf1a42-8112-4bcc-b356-e364d56b93bb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.808626 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbstz\" (UniqueName: \"kubernetes.io/projected/ecdf1a42-8112-4bcc-b356-e364d56b93bb-kube-api-access-wbstz\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.808658 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.808671 4693 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.808683 4693 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.808695 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:43 crc kubenswrapper[4693]: I1122 09:31:43.808707 4693 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ecdf1a42-8112-4bcc-b356-e364d56b93bb-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.152037 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:31:44 crc kubenswrapper[4693]: E1122 09:31:44.152642 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.256189 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" event={"ID":"ecdf1a42-8112-4bcc-b356-e364d56b93bb","Type":"ContainerDied","Data":"6a74d701db9f1ead2979072725b0b905059682a4ed0275086c0b396033b1909e"} Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.256304 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6a74d701db9f1ead2979072725b0b905059682a4ed0275086c0b396033b1909e" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.256311 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.319633 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m"] Nov 22 09:31:44 crc kubenswrapper[4693]: E1122 09:31:44.320018 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ecdf1a42-8112-4bcc-b356-e364d56b93bb" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.320035 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ecdf1a42-8112-4bcc-b356-e364d56b93bb" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.320195 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ecdf1a42-8112-4bcc-b356-e364d56b93bb" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.320801 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.328217 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.328302 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.328364 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.328392 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.334900 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m"] Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.336079 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.418171 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.418304 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.418413 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.418524 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.418657 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bjpsk\" (UniqueName: \"kubernetes.io/projected/537a26f3-39a2-48e4-af18-39d0e944c4b0-kube-api-access-bjpsk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.520271 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bjpsk\" (UniqueName: \"kubernetes.io/projected/537a26f3-39a2-48e4-af18-39d0e944c4b0-kube-api-access-bjpsk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.520464 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.520527 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.520613 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.520742 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.526760 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.526784 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.530269 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.534190 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.538348 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bjpsk\" (UniqueName: \"kubernetes.io/projected/537a26f3-39a2-48e4-af18-39d0e944c4b0-kube-api-access-bjpsk\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:44 crc kubenswrapper[4693]: I1122 09:31:44.634002 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:31:45 crc kubenswrapper[4693]: I1122 09:31:45.115723 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m"] Nov 22 09:31:45 crc kubenswrapper[4693]: I1122 09:31:45.266160 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" event={"ID":"537a26f3-39a2-48e4-af18-39d0e944c4b0","Type":"ContainerStarted","Data":"a13d1341c294522f3f2af1a56ddd3b4ead4aa7d0fcb90b598d0215b892e31040"} Nov 22 09:31:46 crc kubenswrapper[4693]: I1122 09:31:46.314204 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" event={"ID":"537a26f3-39a2-48e4-af18-39d0e944c4b0","Type":"ContainerStarted","Data":"6d508db8fd8a85ac6e58e2cf9e2da0de0450ec00f67700dc1e34966f16aba311"} Nov 22 09:31:46 crc kubenswrapper[4693]: I1122 09:31:46.326971 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" podStartSLOduration=1.781227421 podStartE2EDuration="2.326954942s" podCreationTimestamp="2025-11-22 09:31:44 +0000 UTC" firstStartedPulling="2025-11-22 09:31:45.120012413 +0000 UTC m=+1701.262514704" lastFinishedPulling="2025-11-22 09:31:45.665739934 +0000 UTC m=+1701.808242225" observedRunningTime="2025-11-22 09:31:46.326606727 +0000 UTC m=+1702.469109028" watchObservedRunningTime="2025-11-22 09:31:46.326954942 +0000 UTC m=+1702.469457233" Nov 22 09:31:57 crc kubenswrapper[4693]: I1122 09:31:57.147282 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:31:57 crc kubenswrapper[4693]: E1122 09:31:57.147943 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:32:12 crc kubenswrapper[4693]: I1122 09:32:12.146997 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:32:12 crc kubenswrapper[4693]: E1122 09:32:12.147653 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:32:25 crc kubenswrapper[4693]: I1122 09:32:25.147316 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:32:25 crc kubenswrapper[4693]: E1122 09:32:25.148110 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:32:36 crc kubenswrapper[4693]: I1122 09:32:36.147454 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:32:36 crc kubenswrapper[4693]: E1122 09:32:36.148264 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:32:51 crc kubenswrapper[4693]: I1122 09:32:51.146988 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:32:51 crc kubenswrapper[4693]: E1122 09:32:51.147593 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:33:05 crc kubenswrapper[4693]: I1122 09:33:05.147332 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:33:05 crc kubenswrapper[4693]: I1122 09:33:05.892389 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"87e5a0a229137bf1f778e2fd090c6a9d8030458f6db183eab75ce472c14ee171"} Nov 22 09:34:31 crc kubenswrapper[4693]: I1122 09:34:31.491660 4693 generic.go:334] "Generic (PLEG): container finished" podID="537a26f3-39a2-48e4-af18-39d0e944c4b0" containerID="6d508db8fd8a85ac6e58e2cf9e2da0de0450ec00f67700dc1e34966f16aba311" exitCode=0 Nov 22 09:34:31 crc kubenswrapper[4693]: I1122 09:34:31.491744 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" event={"ID":"537a26f3-39a2-48e4-af18-39d0e944c4b0","Type":"ContainerDied","Data":"6d508db8fd8a85ac6e58e2cf9e2da0de0450ec00f67700dc1e34966f16aba311"} Nov 22 09:34:32 crc kubenswrapper[4693]: I1122 09:34:32.825227 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.016016 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bjpsk\" (UniqueName: \"kubernetes.io/projected/537a26f3-39a2-48e4-af18-39d0e944c4b0-kube-api-access-bjpsk\") pod \"537a26f3-39a2-48e4-af18-39d0e944c4b0\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.016171 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-inventory\") pod \"537a26f3-39a2-48e4-af18-39d0e944c4b0\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.016319 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-ssh-key\") pod \"537a26f3-39a2-48e4-af18-39d0e944c4b0\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.016342 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-secret-0\") pod \"537a26f3-39a2-48e4-af18-39d0e944c4b0\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.016385 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-combined-ca-bundle\") pod \"537a26f3-39a2-48e4-af18-39d0e944c4b0\" (UID: \"537a26f3-39a2-48e4-af18-39d0e944c4b0\") " Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.022714 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "537a26f3-39a2-48e4-af18-39d0e944c4b0" (UID: "537a26f3-39a2-48e4-af18-39d0e944c4b0"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.023250 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/537a26f3-39a2-48e4-af18-39d0e944c4b0-kube-api-access-bjpsk" (OuterVolumeSpecName: "kube-api-access-bjpsk") pod "537a26f3-39a2-48e4-af18-39d0e944c4b0" (UID: "537a26f3-39a2-48e4-af18-39d0e944c4b0"). InnerVolumeSpecName "kube-api-access-bjpsk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.040251 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "537a26f3-39a2-48e4-af18-39d0e944c4b0" (UID: "537a26f3-39a2-48e4-af18-39d0e944c4b0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.041490 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-inventory" (OuterVolumeSpecName: "inventory") pod "537a26f3-39a2-48e4-af18-39d0e944c4b0" (UID: "537a26f3-39a2-48e4-af18-39d0e944c4b0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.043257 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "537a26f3-39a2-48e4-af18-39d0e944c4b0" (UID: "537a26f3-39a2-48e4-af18-39d0e944c4b0"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.118502 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bjpsk\" (UniqueName: \"kubernetes.io/projected/537a26f3-39a2-48e4-af18-39d0e944c4b0-kube-api-access-bjpsk\") on node \"crc\" DevicePath \"\"" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.118807 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.118819 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.118829 4693 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.118839 4693 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/537a26f3-39a2-48e4-af18-39d0e944c4b0-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.508196 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" event={"ID":"537a26f3-39a2-48e4-af18-39d0e944c4b0","Type":"ContainerDied","Data":"a13d1341c294522f3f2af1a56ddd3b4ead4aa7d0fcb90b598d0215b892e31040"} Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.508241 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a13d1341c294522f3f2af1a56ddd3b4ead4aa7d0fcb90b598d0215b892e31040" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.508273 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.581648 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h"] Nov 22 09:34:33 crc kubenswrapper[4693]: E1122 09:34:33.582040 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="537a26f3-39a2-48e4-af18-39d0e944c4b0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.582062 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="537a26f3-39a2-48e4-af18-39d0e944c4b0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.582236 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="537a26f3-39a2-48e4-af18-39d0e944c4b0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.584350 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.585837 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.586432 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.586659 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.587598 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.587821 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.587987 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.591449 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.592899 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h"] Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627602 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627722 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627792 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627839 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627885 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftzr9\" (UniqueName: \"kubernetes.io/projected/75221b44-0170-4231-b768-ad88de26addb-kube-api-access-ftzr9\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627911 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/75221b44-0170-4231-b768-ad88de26addb-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.627947 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.628020 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.628064 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729130 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729192 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729231 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729287 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729309 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729334 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729439 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftzr9\" (UniqueName: \"kubernetes.io/projected/75221b44-0170-4231-b768-ad88de26addb-kube-api-access-ftzr9\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729543 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/75221b44-0170-4231-b768-ad88de26addb-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.729815 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.730542 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/75221b44-0170-4231-b768-ad88de26addb-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.732490 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.732630 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.732835 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.735070 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.735298 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.736662 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.738712 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.746160 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftzr9\" (UniqueName: \"kubernetes.io/projected/75221b44-0170-4231-b768-ad88de26addb-kube-api-access-ftzr9\") pod \"nova-edpm-deployment-openstack-edpm-ipam-vt57h\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:33 crc kubenswrapper[4693]: I1122 09:34:33.900329 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:34:34 crc kubenswrapper[4693]: I1122 09:34:34.336432 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h"] Nov 22 09:34:34 crc kubenswrapper[4693]: I1122 09:34:34.340119 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:34:34 crc kubenswrapper[4693]: I1122 09:34:34.515782 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" event={"ID":"75221b44-0170-4231-b768-ad88de26addb","Type":"ContainerStarted","Data":"768def58fc6d74d83a2241360d658897c4f733d19dec9246ae7fe81f83633085"} Nov 22 09:34:35 crc kubenswrapper[4693]: I1122 09:34:35.523046 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" event={"ID":"75221b44-0170-4231-b768-ad88de26addb","Type":"ContainerStarted","Data":"864ee37dad4e6f0da0c30852be551a724effd7040322b3eb5711c54c444c2e07"} Nov 22 09:34:35 crc kubenswrapper[4693]: I1122 09:34:35.541009 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" podStartSLOduration=2.017901664 podStartE2EDuration="2.540994932s" podCreationTimestamp="2025-11-22 09:34:33 +0000 UTC" firstStartedPulling="2025-11-22 09:34:34.339828114 +0000 UTC m=+1870.482330405" lastFinishedPulling="2025-11-22 09:34:34.862921382 +0000 UTC m=+1871.005423673" observedRunningTime="2025-11-22 09:34:35.5347136 +0000 UTC m=+1871.677215891" watchObservedRunningTime="2025-11-22 09:34:35.540994932 +0000 UTC m=+1871.683497213" Nov 22 09:35:30 crc kubenswrapper[4693]: I1122 09:35:30.246046 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:35:30 crc kubenswrapper[4693]: I1122 09:35:30.246569 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:36:00 crc kubenswrapper[4693]: I1122 09:36:00.246942 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:36:00 crc kubenswrapper[4693]: I1122 09:36:00.247544 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:36:23 crc kubenswrapper[4693]: I1122 09:36:23.380986 4693 generic.go:334] "Generic (PLEG): container finished" podID="75221b44-0170-4231-b768-ad88de26addb" containerID="864ee37dad4e6f0da0c30852be551a724effd7040322b3eb5711c54c444c2e07" exitCode=0 Nov 22 09:36:23 crc kubenswrapper[4693]: I1122 09:36:23.381066 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" event={"ID":"75221b44-0170-4231-b768-ad88de26addb","Type":"ContainerDied","Data":"864ee37dad4e6f0da0c30852be551a724effd7040322b3eb5711c54c444c2e07"} Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.691839 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.699822 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-inventory\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.699913 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/75221b44-0170-4231-b768-ad88de26addb-nova-extra-config-0\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700005 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-1\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700087 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-combined-ca-bundle\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700118 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-ssh-key\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700205 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-0\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700443 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-0\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700547 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-1\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.700572 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftzr9\" (UniqueName: \"kubernetes.io/projected/75221b44-0170-4231-b768-ad88de26addb-kube-api-access-ftzr9\") pod \"75221b44-0170-4231-b768-ad88de26addb\" (UID: \"75221b44-0170-4231-b768-ad88de26addb\") " Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.708809 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.708977 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/75221b44-0170-4231-b768-ad88de26addb-kube-api-access-ftzr9" (OuterVolumeSpecName: "kube-api-access-ftzr9") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "kube-api-access-ftzr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.725717 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-inventory" (OuterVolumeSpecName: "inventory") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.728960 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/75221b44-0170-4231-b768-ad88de26addb-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.733043 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.735544 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.737276 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.741836 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.744417 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "75221b44-0170-4231-b768-ad88de26addb" (UID: "75221b44-0170-4231-b768-ad88de26addb"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.801991 4693 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802016 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftzr9\" (UniqueName: \"kubernetes.io/projected/75221b44-0170-4231-b768-ad88de26addb-kube-api-access-ftzr9\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802026 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802037 4693 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/75221b44-0170-4231-b768-ad88de26addb-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802046 4693 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802054 4693 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802063 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802071 4693 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:24 crc kubenswrapper[4693]: I1122 09:36:24.802079 4693 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/75221b44-0170-4231-b768-ad88de26addb-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.398854 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" event={"ID":"75221b44-0170-4231-b768-ad88de26addb","Type":"ContainerDied","Data":"768def58fc6d74d83a2241360d658897c4f733d19dec9246ae7fe81f83633085"} Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.398906 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="768def58fc6d74d83a2241360d658897c4f733d19dec9246ae7fe81f83633085" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.398970 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-vt57h" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.550704 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz"] Nov 22 09:36:25 crc kubenswrapper[4693]: E1122 09:36:25.552097 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="75221b44-0170-4231-b768-ad88de26addb" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.552114 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="75221b44-0170-4231-b768-ad88de26addb" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.552546 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="75221b44-0170-4231-b768-ad88de26addb" containerName="nova-edpm-deployment-openstack-edpm-ipam" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.553520 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.555893 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.556295 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.556566 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-compute-config-data" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.557242 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-8frgq" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.559605 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.580892 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz"] Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619090 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619353 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619463 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619538 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619631 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fp7rt\" (UniqueName: \"kubernetes.io/projected/021fbba7-764a-4284-a4f1-1b8db668d9fd-kube-api-access-fp7rt\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619768 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.619884 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.721320 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.722154 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.722273 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.722316 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.722340 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.722416 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fp7rt\" (UniqueName: \"kubernetes.io/projected/021fbba7-764a-4284-a4f1-1b8db668d9fd-kube-api-access-fp7rt\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.722990 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.727462 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-0\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.728097 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-inventory\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.728103 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-telemetry-combined-ca-bundle\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.729032 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-1\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.729292 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ssh-key\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.729572 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-2\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.736270 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fp7rt\" (UniqueName: \"kubernetes.io/projected/021fbba7-764a-4284-a4f1-1b8db668d9fd-kube-api-access-fp7rt\") pod \"telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:25 crc kubenswrapper[4693]: I1122 09:36:25.874586 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:36:26 crc kubenswrapper[4693]: I1122 09:36:26.333172 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz"] Nov 22 09:36:26 crc kubenswrapper[4693]: I1122 09:36:26.407123 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" event={"ID":"021fbba7-764a-4284-a4f1-1b8db668d9fd","Type":"ContainerStarted","Data":"aca89ff758ba14b57d9160c597415dee56affd05c41bb6ddb4865a5693027222"} Nov 22 09:36:27 crc kubenswrapper[4693]: I1122 09:36:27.415671 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" event={"ID":"021fbba7-764a-4284-a4f1-1b8db668d9fd","Type":"ContainerStarted","Data":"a6d081f5704eb8bf86e2e742aed09ebbb01f4eb69d37e885464761524f6d78f1"} Nov 22 09:36:27 crc kubenswrapper[4693]: I1122 09:36:27.434293 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" podStartSLOduration=1.939376267 podStartE2EDuration="2.434268729s" podCreationTimestamp="2025-11-22 09:36:25 +0000 UTC" firstStartedPulling="2025-11-22 09:36:26.341496898 +0000 UTC m=+1982.483999189" lastFinishedPulling="2025-11-22 09:36:26.83638936 +0000 UTC m=+1982.978891651" observedRunningTime="2025-11-22 09:36:27.430514641 +0000 UTC m=+1983.573016932" watchObservedRunningTime="2025-11-22 09:36:27.434268729 +0000 UTC m=+1983.576771020" Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.246262 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.246576 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.246620 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.247064 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"87e5a0a229137bf1f778e2fd090c6a9d8030458f6db183eab75ce472c14ee171"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.247135 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://87e5a0a229137bf1f778e2fd090c6a9d8030458f6db183eab75ce472c14ee171" gracePeriod=600 Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.444323 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="87e5a0a229137bf1f778e2fd090c6a9d8030458f6db183eab75ce472c14ee171" exitCode=0 Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.444508 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"87e5a0a229137bf1f778e2fd090c6a9d8030458f6db183eab75ce472c14ee171"} Nov 22 09:36:30 crc kubenswrapper[4693]: I1122 09:36:30.444598 4693 scope.go:117] "RemoveContainer" containerID="066b670526a12ee6608c86010ab1a6a16a6e22409aa609b4e3d8c2b3863f60c2" Nov 22 09:36:31 crc kubenswrapper[4693]: I1122 09:36:31.455909 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25"} Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.270128 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b2skp"] Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.272531 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.292460 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b2skp"] Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.331273 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8fv2\" (UniqueName: \"kubernetes.io/projected/9c5568d8-7e40-49a1-95a4-6e60663de0d8-kube-api-access-h8fv2\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.331522 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-utilities\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.331617 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-catalog-content\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.433567 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-utilities\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.433614 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-catalog-content\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.433736 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8fv2\" (UniqueName: \"kubernetes.io/projected/9c5568d8-7e40-49a1-95a4-6e60663de0d8-kube-api-access-h8fv2\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.434203 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-utilities\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.434260 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-catalog-content\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.451522 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8fv2\" (UniqueName: \"kubernetes.io/projected/9c5568d8-7e40-49a1-95a4-6e60663de0d8-kube-api-access-h8fv2\") pod \"community-operators-b2skp\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:09 crc kubenswrapper[4693]: I1122 09:37:09.590542 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:10 crc kubenswrapper[4693]: I1122 09:37:10.033999 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b2skp"] Nov 22 09:37:10 crc kubenswrapper[4693]: I1122 09:37:10.765977 4693 generic.go:334] "Generic (PLEG): container finished" podID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerID="7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13" exitCode=0 Nov 22 09:37:10 crc kubenswrapper[4693]: I1122 09:37:10.766088 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerDied","Data":"7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13"} Nov 22 09:37:10 crc kubenswrapper[4693]: I1122 09:37:10.766283 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerStarted","Data":"d88cf7c467f398b302c5f294db0f4d4d5333c4c112dc46dd6d4a4c09765c6ec4"} Nov 22 09:37:11 crc kubenswrapper[4693]: I1122 09:37:11.775076 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerStarted","Data":"78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481"} Nov 22 09:37:12 crc kubenswrapper[4693]: I1122 09:37:12.787051 4693 generic.go:334] "Generic (PLEG): container finished" podID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerID="78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481" exitCode=0 Nov 22 09:37:12 crc kubenswrapper[4693]: I1122 09:37:12.787132 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerDied","Data":"78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481"} Nov 22 09:37:13 crc kubenswrapper[4693]: I1122 09:37:13.798662 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerStarted","Data":"ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0"} Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.842392 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b2skp" podStartSLOduration=4.285627115 podStartE2EDuration="6.842362245s" podCreationTimestamp="2025-11-22 09:37:09 +0000 UTC" firstStartedPulling="2025-11-22 09:37:10.768838732 +0000 UTC m=+2026.911341024" lastFinishedPulling="2025-11-22 09:37:13.325573862 +0000 UTC m=+2029.468076154" observedRunningTime="2025-11-22 09:37:13.819010948 +0000 UTC m=+2029.961513240" watchObservedRunningTime="2025-11-22 09:37:15.842362245 +0000 UTC m=+2031.984864536" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.846477 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-s2hr8"] Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.848188 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.860083 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s2hr8"] Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.865364 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zvwww\" (UniqueName: \"kubernetes.io/projected/de2a9705-f52f-4fa7-9cea-c056c66954ce-kube-api-access-zvwww\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.865493 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-utilities\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.865538 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-catalog-content\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.968063 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zvwww\" (UniqueName: \"kubernetes.io/projected/de2a9705-f52f-4fa7-9cea-c056c66954ce-kube-api-access-zvwww\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.968184 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-utilities\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.968226 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-catalog-content\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.968697 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-utilities\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.968721 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-catalog-content\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:15 crc kubenswrapper[4693]: I1122 09:37:15.988715 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zvwww\" (UniqueName: \"kubernetes.io/projected/de2a9705-f52f-4fa7-9cea-c056c66954ce-kube-api-access-zvwww\") pod \"certified-operators-s2hr8\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:16 crc kubenswrapper[4693]: I1122 09:37:16.162157 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:16 crc kubenswrapper[4693]: I1122 09:37:16.619270 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-s2hr8"] Nov 22 09:37:16 crc kubenswrapper[4693]: W1122 09:37:16.621293 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podde2a9705_f52f_4fa7_9cea_c056c66954ce.slice/crio-6be3d2d09ffe3bc6588cb93fb4df9fee8e5d58243e20e0450ac543d65e463309 WatchSource:0}: Error finding container 6be3d2d09ffe3bc6588cb93fb4df9fee8e5d58243e20e0450ac543d65e463309: Status 404 returned error can't find the container with id 6be3d2d09ffe3bc6588cb93fb4df9fee8e5d58243e20e0450ac543d65e463309 Nov 22 09:37:16 crc kubenswrapper[4693]: I1122 09:37:16.855896 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerStarted","Data":"5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c"} Nov 22 09:37:16 crc kubenswrapper[4693]: I1122 09:37:16.856779 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerStarted","Data":"6be3d2d09ffe3bc6588cb93fb4df9fee8e5d58243e20e0450ac543d65e463309"} Nov 22 09:37:17 crc kubenswrapper[4693]: I1122 09:37:17.897193 4693 generic.go:334] "Generic (PLEG): container finished" podID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerID="5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c" exitCode=0 Nov 22 09:37:17 crc kubenswrapper[4693]: I1122 09:37:17.897401 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerDied","Data":"5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c"} Nov 22 09:37:18 crc kubenswrapper[4693]: I1122 09:37:18.912781 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerStarted","Data":"da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e"} Nov 22 09:37:19 crc kubenswrapper[4693]: I1122 09:37:19.590644 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:19 crc kubenswrapper[4693]: I1122 09:37:19.590706 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:19 crc kubenswrapper[4693]: I1122 09:37:19.627871 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:19 crc kubenswrapper[4693]: I1122 09:37:19.922876 4693 generic.go:334] "Generic (PLEG): container finished" podID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerID="da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e" exitCode=0 Nov 22 09:37:19 crc kubenswrapper[4693]: I1122 09:37:19.922940 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerDied","Data":"da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e"} Nov 22 09:37:19 crc kubenswrapper[4693]: I1122 09:37:19.960018 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:20 crc kubenswrapper[4693]: I1122 09:37:20.932881 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerStarted","Data":"19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f"} Nov 22 09:37:20 crc kubenswrapper[4693]: I1122 09:37:20.955662 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-s2hr8" podStartSLOduration=3.451758119 podStartE2EDuration="5.95563274s" podCreationTimestamp="2025-11-22 09:37:15 +0000 UTC" firstStartedPulling="2025-11-22 09:37:17.905552787 +0000 UTC m=+2034.048055079" lastFinishedPulling="2025-11-22 09:37:20.40942741 +0000 UTC m=+2036.551929700" observedRunningTime="2025-11-22 09:37:20.951027301 +0000 UTC m=+2037.093529593" watchObservedRunningTime="2025-11-22 09:37:20.95563274 +0000 UTC m=+2037.098135031" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.047610 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b2skp"] Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.048464 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b2skp" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="registry-server" containerID="cri-o://ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0" gracePeriod=2 Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.441154 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.612326 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-utilities\") pod \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.612870 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-catalog-content\") pod \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.612910 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8fv2\" (UniqueName: \"kubernetes.io/projected/9c5568d8-7e40-49a1-95a4-6e60663de0d8-kube-api-access-h8fv2\") pod \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\" (UID: \"9c5568d8-7e40-49a1-95a4-6e60663de0d8\") " Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.613310 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-utilities" (OuterVolumeSpecName: "utilities") pod "9c5568d8-7e40-49a1-95a4-6e60663de0d8" (UID: "9c5568d8-7e40-49a1-95a4-6e60663de0d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.613738 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.619723 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c5568d8-7e40-49a1-95a4-6e60663de0d8-kube-api-access-h8fv2" (OuterVolumeSpecName: "kube-api-access-h8fv2") pod "9c5568d8-7e40-49a1-95a4-6e60663de0d8" (UID: "9c5568d8-7e40-49a1-95a4-6e60663de0d8"). InnerVolumeSpecName "kube-api-access-h8fv2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.648977 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9c5568d8-7e40-49a1-95a4-6e60663de0d8" (UID: "9c5568d8-7e40-49a1-95a4-6e60663de0d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.715391 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9c5568d8-7e40-49a1-95a4-6e60663de0d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.715439 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8fv2\" (UniqueName: \"kubernetes.io/projected/9c5568d8-7e40-49a1-95a4-6e60663de0d8-kube-api-access-h8fv2\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.952290 4693 generic.go:334] "Generic (PLEG): container finished" podID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerID="ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0" exitCode=0 Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.952336 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerDied","Data":"ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0"} Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.952365 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b2skp" event={"ID":"9c5568d8-7e40-49a1-95a4-6e60663de0d8","Type":"ContainerDied","Data":"d88cf7c467f398b302c5f294db0f4d4d5333c4c112dc46dd6d4a4c09765c6ec4"} Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.952365 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b2skp" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.952393 4693 scope.go:117] "RemoveContainer" containerID="ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.977353 4693 scope.go:117] "RemoveContainer" containerID="78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481" Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.978147 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b2skp"] Nov 22 09:37:22 crc kubenswrapper[4693]: I1122 09:37:22.989401 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b2skp"] Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.019882 4693 scope.go:117] "RemoveContainer" containerID="7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13" Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.040996 4693 scope.go:117] "RemoveContainer" containerID="ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0" Nov 22 09:37:23 crc kubenswrapper[4693]: E1122 09:37:23.041431 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0\": container with ID starting with ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0 not found: ID does not exist" containerID="ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0" Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.041473 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0"} err="failed to get container status \"ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0\": rpc error: code = NotFound desc = could not find container \"ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0\": container with ID starting with ab2f6c6138dcac1b04641fc6ee7c79ace4f1a91ca685ee06b388ec0d66d327d0 not found: ID does not exist" Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.041499 4693 scope.go:117] "RemoveContainer" containerID="78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481" Nov 22 09:37:23 crc kubenswrapper[4693]: E1122 09:37:23.041832 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481\": container with ID starting with 78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481 not found: ID does not exist" containerID="78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481" Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.041883 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481"} err="failed to get container status \"78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481\": rpc error: code = NotFound desc = could not find container \"78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481\": container with ID starting with 78b770ef3a2c78537049f694d776dc502d4f01fca8aab28c836618931742b481 not found: ID does not exist" Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.041904 4693 scope.go:117] "RemoveContainer" containerID="7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13" Nov 22 09:37:23 crc kubenswrapper[4693]: E1122 09:37:23.042607 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13\": container with ID starting with 7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13 not found: ID does not exist" containerID="7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13" Nov 22 09:37:23 crc kubenswrapper[4693]: I1122 09:37:23.042632 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13"} err="failed to get container status \"7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13\": rpc error: code = NotFound desc = could not find container \"7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13\": container with ID starting with 7a8fdc7e6fdf854380dc8806c1fa477b3606cc0b7871116da44304d275d62a13 not found: ID does not exist" Nov 22 09:37:24 crc kubenswrapper[4693]: I1122 09:37:24.158901 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" path="/var/lib/kubelet/pods/9c5568d8-7e40-49a1-95a4-6e60663de0d8/volumes" Nov 22 09:37:26 crc kubenswrapper[4693]: I1122 09:37:26.163024 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:26 crc kubenswrapper[4693]: I1122 09:37:26.163388 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:26 crc kubenswrapper[4693]: I1122 09:37:26.205195 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:27 crc kubenswrapper[4693]: I1122 09:37:27.022078 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:27 crc kubenswrapper[4693]: I1122 09:37:27.067217 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s2hr8"] Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.005622 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-s2hr8" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="registry-server" containerID="cri-o://19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f" gracePeriod=2 Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.407087 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.551913 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-catalog-content\") pod \"de2a9705-f52f-4fa7-9cea-c056c66954ce\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.552360 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zvwww\" (UniqueName: \"kubernetes.io/projected/de2a9705-f52f-4fa7-9cea-c056c66954ce-kube-api-access-zvwww\") pod \"de2a9705-f52f-4fa7-9cea-c056c66954ce\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.552431 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-utilities\") pod \"de2a9705-f52f-4fa7-9cea-c056c66954ce\" (UID: \"de2a9705-f52f-4fa7-9cea-c056c66954ce\") " Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.553042 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-utilities" (OuterVolumeSpecName: "utilities") pod "de2a9705-f52f-4fa7-9cea-c056c66954ce" (UID: "de2a9705-f52f-4fa7-9cea-c056c66954ce"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.558109 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de2a9705-f52f-4fa7-9cea-c056c66954ce-kube-api-access-zvwww" (OuterVolumeSpecName: "kube-api-access-zvwww") pod "de2a9705-f52f-4fa7-9cea-c056c66954ce" (UID: "de2a9705-f52f-4fa7-9cea-c056c66954ce"). InnerVolumeSpecName "kube-api-access-zvwww". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.593925 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "de2a9705-f52f-4fa7-9cea-c056c66954ce" (UID: "de2a9705-f52f-4fa7-9cea-c056c66954ce"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.655095 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zvwww\" (UniqueName: \"kubernetes.io/projected/de2a9705-f52f-4fa7-9cea-c056c66954ce-kube-api-access-zvwww\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.655131 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:29 crc kubenswrapper[4693]: I1122 09:37:29.655142 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/de2a9705-f52f-4fa7-9cea-c056c66954ce-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.013798 4693 generic.go:334] "Generic (PLEG): container finished" podID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerID="19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f" exitCode=0 Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.013910 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerDied","Data":"19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f"} Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.014193 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-s2hr8" event={"ID":"de2a9705-f52f-4fa7-9cea-c056c66954ce","Type":"ContainerDied","Data":"6be3d2d09ffe3bc6588cb93fb4df9fee8e5d58243e20e0450ac543d65e463309"} Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.014221 4693 scope.go:117] "RemoveContainer" containerID="19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.013957 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-s2hr8" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.039698 4693 scope.go:117] "RemoveContainer" containerID="da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.049458 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-s2hr8"] Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.060138 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-s2hr8"] Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.070790 4693 scope.go:117] "RemoveContainer" containerID="5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.101247 4693 scope.go:117] "RemoveContainer" containerID="19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f" Nov 22 09:37:30 crc kubenswrapper[4693]: E1122 09:37:30.101545 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f\": container with ID starting with 19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f not found: ID does not exist" containerID="19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.101576 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f"} err="failed to get container status \"19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f\": rpc error: code = NotFound desc = could not find container \"19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f\": container with ID starting with 19c19a3dfe54661dae6b8286233242392b10dab121fcd324ff935f75a1682c8f not found: ID does not exist" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.101597 4693 scope.go:117] "RemoveContainer" containerID="da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e" Nov 22 09:37:30 crc kubenswrapper[4693]: E1122 09:37:30.101966 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e\": container with ID starting with da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e not found: ID does not exist" containerID="da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.101988 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e"} err="failed to get container status \"da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e\": rpc error: code = NotFound desc = could not find container \"da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e\": container with ID starting with da6a3a557518f3a915e5e39e51391aa3b3f77a85ffc1729fb56b3b48d79b8f9e not found: ID does not exist" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.102000 4693 scope.go:117] "RemoveContainer" containerID="5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c" Nov 22 09:37:30 crc kubenswrapper[4693]: E1122 09:37:30.102270 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c\": container with ID starting with 5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c not found: ID does not exist" containerID="5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.102294 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c"} err="failed to get container status \"5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c\": rpc error: code = NotFound desc = could not find container \"5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c\": container with ID starting with 5b55319d990aaaf3344ddaa7a02729710f2646a1cc7981aba150cc69c959698c not found: ID does not exist" Nov 22 09:37:30 crc kubenswrapper[4693]: I1122 09:37:30.157469 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" path="/var/lib/kubelet/pods/de2a9705-f52f-4fa7-9cea-c056c66954ce/volumes" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.056884 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-qq292"] Nov 22 09:37:46 crc kubenswrapper[4693]: E1122 09:37:46.057558 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="registry-server" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057569 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="registry-server" Nov 22 09:37:46 crc kubenswrapper[4693]: E1122 09:37:46.057582 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="extract-utilities" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057587 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="extract-utilities" Nov 22 09:37:46 crc kubenswrapper[4693]: E1122 09:37:46.057598 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="extract-content" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057604 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="extract-content" Nov 22 09:37:46 crc kubenswrapper[4693]: E1122 09:37:46.057621 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="extract-content" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057626 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="extract-content" Nov 22 09:37:46 crc kubenswrapper[4693]: E1122 09:37:46.057648 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="extract-utilities" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057661 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="extract-utilities" Nov 22 09:37:46 crc kubenswrapper[4693]: E1122 09:37:46.057678 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="registry-server" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057683 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="registry-server" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057828 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c5568d8-7e40-49a1-95a4-6e60663de0d8" containerName="registry-server" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.057868 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="de2a9705-f52f-4fa7-9cea-c056c66954ce" containerName="registry-server" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.058994 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.072711 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq292"] Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.185766 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8m72s\" (UniqueName: \"kubernetes.io/projected/342039e5-0014-4b61-8dfc-c2e94edf8195-kube-api-access-8m72s\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.185866 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-catalog-content\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.185914 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-utilities\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.289677 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8m72s\" (UniqueName: \"kubernetes.io/projected/342039e5-0014-4b61-8dfc-c2e94edf8195-kube-api-access-8m72s\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.289959 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-catalog-content\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.290177 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-utilities\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.290688 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-catalog-content\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.290778 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-utilities\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.310516 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8m72s\" (UniqueName: \"kubernetes.io/projected/342039e5-0014-4b61-8dfc-c2e94edf8195-kube-api-access-8m72s\") pod \"redhat-marketplace-qq292\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.374014 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:46 crc kubenswrapper[4693]: I1122 09:37:46.817409 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq292"] Nov 22 09:37:47 crc kubenswrapper[4693]: I1122 09:37:47.153682 4693 generic.go:334] "Generic (PLEG): container finished" podID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerID="6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e" exitCode=0 Nov 22 09:37:47 crc kubenswrapper[4693]: I1122 09:37:47.153735 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq292" event={"ID":"342039e5-0014-4b61-8dfc-c2e94edf8195","Type":"ContainerDied","Data":"6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e"} Nov 22 09:37:47 crc kubenswrapper[4693]: I1122 09:37:47.153768 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq292" event={"ID":"342039e5-0014-4b61-8dfc-c2e94edf8195","Type":"ContainerStarted","Data":"49f4fb4a34eb83347574c4c5c4f70f5f1979236b081c9277fe69535a13bc265f"} Nov 22 09:37:48 crc kubenswrapper[4693]: I1122 09:37:48.178595 4693 generic.go:334] "Generic (PLEG): container finished" podID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerID="8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b" exitCode=0 Nov 22 09:37:48 crc kubenswrapper[4693]: I1122 09:37:48.178699 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq292" event={"ID":"342039e5-0014-4b61-8dfc-c2e94edf8195","Type":"ContainerDied","Data":"8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b"} Nov 22 09:37:49 crc kubenswrapper[4693]: I1122 09:37:49.191364 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq292" event={"ID":"342039e5-0014-4b61-8dfc-c2e94edf8195","Type":"ContainerStarted","Data":"7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20"} Nov 22 09:37:49 crc kubenswrapper[4693]: I1122 09:37:49.208882 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-qq292" podStartSLOduration=1.681875315 podStartE2EDuration="3.208859366s" podCreationTimestamp="2025-11-22 09:37:46 +0000 UTC" firstStartedPulling="2025-11-22 09:37:47.15693215 +0000 UTC m=+2063.299434441" lastFinishedPulling="2025-11-22 09:37:48.683916202 +0000 UTC m=+2064.826418492" observedRunningTime="2025-11-22 09:37:49.205942261 +0000 UTC m=+2065.348444552" watchObservedRunningTime="2025-11-22 09:37:49.208859366 +0000 UTC m=+2065.351361656" Nov 22 09:37:56 crc kubenswrapper[4693]: I1122 09:37:56.374795 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:56 crc kubenswrapper[4693]: I1122 09:37:56.375582 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:56 crc kubenswrapper[4693]: I1122 09:37:56.410106 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:57 crc kubenswrapper[4693]: I1122 09:37:57.277940 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:57 crc kubenswrapper[4693]: I1122 09:37:57.320630 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq292"] Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.262269 4693 generic.go:334] "Generic (PLEG): container finished" podID="021fbba7-764a-4284-a4f1-1b8db668d9fd" containerID="a6d081f5704eb8bf86e2e742aed09ebbb01f4eb69d37e885464761524f6d78f1" exitCode=0 Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.262344 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" event={"ID":"021fbba7-764a-4284-a4f1-1b8db668d9fd","Type":"ContainerDied","Data":"a6d081f5704eb8bf86e2e742aed09ebbb01f4eb69d37e885464761524f6d78f1"} Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.262708 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-qq292" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="registry-server" containerID="cri-o://7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20" gracePeriod=2 Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.643699 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.754578 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-catalog-content\") pod \"342039e5-0014-4b61-8dfc-c2e94edf8195\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.754749 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-utilities\") pod \"342039e5-0014-4b61-8dfc-c2e94edf8195\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.755192 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8m72s\" (UniqueName: \"kubernetes.io/projected/342039e5-0014-4b61-8dfc-c2e94edf8195-kube-api-access-8m72s\") pod \"342039e5-0014-4b61-8dfc-c2e94edf8195\" (UID: \"342039e5-0014-4b61-8dfc-c2e94edf8195\") " Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.755903 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-utilities" (OuterVolumeSpecName: "utilities") pod "342039e5-0014-4b61-8dfc-c2e94edf8195" (UID: "342039e5-0014-4b61-8dfc-c2e94edf8195"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.756817 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.761107 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/342039e5-0014-4b61-8dfc-c2e94edf8195-kube-api-access-8m72s" (OuterVolumeSpecName: "kube-api-access-8m72s") pod "342039e5-0014-4b61-8dfc-c2e94edf8195" (UID: "342039e5-0014-4b61-8dfc-c2e94edf8195"). InnerVolumeSpecName "kube-api-access-8m72s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.769335 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "342039e5-0014-4b61-8dfc-c2e94edf8195" (UID: "342039e5-0014-4b61-8dfc-c2e94edf8195"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.858193 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/342039e5-0014-4b61-8dfc-c2e94edf8195-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:37:59 crc kubenswrapper[4693]: I1122 09:37:59.858445 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8m72s\" (UniqueName: \"kubernetes.io/projected/342039e5-0014-4b61-8dfc-c2e94edf8195-kube-api-access-8m72s\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.272319 4693 generic.go:334] "Generic (PLEG): container finished" podID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerID="7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20" exitCode=0 Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.272376 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-qq292" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.272426 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq292" event={"ID":"342039e5-0014-4b61-8dfc-c2e94edf8195","Type":"ContainerDied","Data":"7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20"} Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.272477 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-qq292" event={"ID":"342039e5-0014-4b61-8dfc-c2e94edf8195","Type":"ContainerDied","Data":"49f4fb4a34eb83347574c4c5c4f70f5f1979236b081c9277fe69535a13bc265f"} Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.272502 4693 scope.go:117] "RemoveContainer" containerID="7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.294086 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq292"] Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.298509 4693 scope.go:117] "RemoveContainer" containerID="8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.300345 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-qq292"] Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.327037 4693 scope.go:117] "RemoveContainer" containerID="6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.359431 4693 scope.go:117] "RemoveContainer" containerID="7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20" Nov 22 09:38:00 crc kubenswrapper[4693]: E1122 09:38:00.359761 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20\": container with ID starting with 7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20 not found: ID does not exist" containerID="7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.359787 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20"} err="failed to get container status \"7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20\": rpc error: code = NotFound desc = could not find container \"7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20\": container with ID starting with 7568ef4f5038a1a1012f54758f75cd9fdf3e5c8a445abbbaeede2c7066c4df20 not found: ID does not exist" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.359810 4693 scope.go:117] "RemoveContainer" containerID="8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b" Nov 22 09:38:00 crc kubenswrapper[4693]: E1122 09:38:00.360175 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b\": container with ID starting with 8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b not found: ID does not exist" containerID="8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.360192 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b"} err="failed to get container status \"8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b\": rpc error: code = NotFound desc = could not find container \"8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b\": container with ID starting with 8aa2c96c5eb9f742b8067246b448b069b33537ee05fbf6c038c680f55ba0254b not found: ID does not exist" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.360204 4693 scope.go:117] "RemoveContainer" containerID="6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e" Nov 22 09:38:00 crc kubenswrapper[4693]: E1122 09:38:00.360558 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e\": container with ID starting with 6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e not found: ID does not exist" containerID="6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.360596 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e"} err="failed to get container status \"6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e\": rpc error: code = NotFound desc = could not find container \"6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e\": container with ID starting with 6b7e0e27a1db717f75dcd27b2e3e96d037ab7cd765597ab73bd640907e08d76e not found: ID does not exist" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.622506 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.700708 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-telemetry-combined-ca-bundle\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.700825 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-2\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.700894 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-0\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.701002 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-1\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.701066 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ssh-key\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.701455 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-inventory\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.701534 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fp7rt\" (UniqueName: \"kubernetes.io/projected/021fbba7-764a-4284-a4f1-1b8db668d9fd-kube-api-access-fp7rt\") pod \"021fbba7-764a-4284-a4f1-1b8db668d9fd\" (UID: \"021fbba7-764a-4284-a4f1-1b8db668d9fd\") " Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.706963 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/021fbba7-764a-4284-a4f1-1b8db668d9fd-kube-api-access-fp7rt" (OuterVolumeSpecName: "kube-api-access-fp7rt") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "kube-api-access-fp7rt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.707452 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.726895 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-0" (OuterVolumeSpecName: "ceilometer-compute-config-data-0") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "ceilometer-compute-config-data-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.727370 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-1" (OuterVolumeSpecName: "ceilometer-compute-config-data-1") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "ceilometer-compute-config-data-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.727454 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.727650 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-2" (OuterVolumeSpecName: "ceilometer-compute-config-data-2") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "ceilometer-compute-config-data-2". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.728950 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-inventory" (OuterVolumeSpecName: "inventory") pod "021fbba7-764a-4284-a4f1-1b8db668d9fd" (UID: "021fbba7-764a-4284-a4f1-1b8db668d9fd"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804062 4693 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-1\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-1\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804093 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804104 4693 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-inventory\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804116 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fp7rt\" (UniqueName: \"kubernetes.io/projected/021fbba7-764a-4284-a4f1-1b8db668d9fd-kube-api-access-fp7rt\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804127 4693 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804136 4693 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-2\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-2\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:00 crc kubenswrapper[4693]: I1122 09:38:00.804146 4693 reconciler_common.go:293] "Volume detached for volume \"ceilometer-compute-config-data-0\" (UniqueName: \"kubernetes.io/secret/021fbba7-764a-4284-a4f1-1b8db668d9fd-ceilometer-compute-config-data-0\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:01 crc kubenswrapper[4693]: I1122 09:38:01.282986 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" event={"ID":"021fbba7-764a-4284-a4f1-1b8db668d9fd","Type":"ContainerDied","Data":"aca89ff758ba14b57d9160c597415dee56affd05c41bb6ddb4865a5693027222"} Nov 22 09:38:01 crc kubenswrapper[4693]: I1122 09:38:01.283025 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz" Nov 22 09:38:01 crc kubenswrapper[4693]: I1122 09:38:01.283033 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aca89ff758ba14b57d9160c597415dee56affd05c41bb6ddb4865a5693027222" Nov 22 09:38:02 crc kubenswrapper[4693]: I1122 09:38:02.156704 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" path="/var/lib/kubelet/pods/342039e5-0014-4b61-8dfc-c2e94edf8195/volumes" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.330282 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-sl95s"] Nov 22 09:38:19 crc kubenswrapper[4693]: E1122 09:38:19.330984 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="extract-content" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.330997 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="extract-content" Nov 22 09:38:19 crc kubenswrapper[4693]: E1122 09:38:19.331028 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="021fbba7-764a-4284-a4f1-1b8db668d9fd" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.331035 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="021fbba7-764a-4284-a4f1-1b8db668d9fd" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 09:38:19 crc kubenswrapper[4693]: E1122 09:38:19.331048 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="registry-server" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.331053 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="registry-server" Nov 22 09:38:19 crc kubenswrapper[4693]: E1122 09:38:19.331064 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="extract-utilities" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.331070 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="extract-utilities" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.331244 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="021fbba7-764a-4284-a4f1-1b8db668d9fd" containerName="telemetry-edpm-deployment-openstack-edpm-ipam" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.331253 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="342039e5-0014-4b61-8dfc-c2e94edf8195" containerName="registry-server" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.332323 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.339491 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sl95s"] Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.388989 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gz8gt\" (UniqueName: \"kubernetes.io/projected/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-kube-api-access-gz8gt\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.389075 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-catalog-content\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.389113 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-utilities\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.491015 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-catalog-content\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.491340 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-utilities\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.491429 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-catalog-content\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.491462 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gz8gt\" (UniqueName: \"kubernetes.io/projected/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-kube-api-access-gz8gt\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.491905 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-utilities\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.508437 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gz8gt\" (UniqueName: \"kubernetes.io/projected/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-kube-api-access-gz8gt\") pod \"redhat-operators-sl95s\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:19 crc kubenswrapper[4693]: I1122 09:38:19.650979 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:20 crc kubenswrapper[4693]: I1122 09:38:20.081763 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-sl95s"] Nov 22 09:38:20 crc kubenswrapper[4693]: I1122 09:38:20.441922 4693 generic.go:334] "Generic (PLEG): container finished" podID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerID="fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3" exitCode=0 Nov 22 09:38:20 crc kubenswrapper[4693]: I1122 09:38:20.442111 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerDied","Data":"fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3"} Nov 22 09:38:20 crc kubenswrapper[4693]: I1122 09:38:20.442289 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerStarted","Data":"458c61599aa73eaa5dc2dced41890caf4304e2f6ff1e194a5bd148c8f6efe0ec"} Nov 22 09:38:21 crc kubenswrapper[4693]: I1122 09:38:21.454815 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerStarted","Data":"d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a"} Nov 22 09:38:22 crc kubenswrapper[4693]: I1122 09:38:22.468474 4693 generic.go:334] "Generic (PLEG): container finished" podID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerID="d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a" exitCode=0 Nov 22 09:38:22 crc kubenswrapper[4693]: I1122 09:38:22.468539 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerDied","Data":"d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a"} Nov 22 09:38:23 crc kubenswrapper[4693]: I1122 09:38:23.477567 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerStarted","Data":"03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946"} Nov 22 09:38:23 crc kubenswrapper[4693]: I1122 09:38:23.499971 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-sl95s" podStartSLOduration=1.9961040479999999 podStartE2EDuration="4.499947511s" podCreationTimestamp="2025-11-22 09:38:19 +0000 UTC" firstStartedPulling="2025-11-22 09:38:20.443756617 +0000 UTC m=+2096.586258909" lastFinishedPulling="2025-11-22 09:38:22.947600081 +0000 UTC m=+2099.090102372" observedRunningTime="2025-11-22 09:38:23.493818878 +0000 UTC m=+2099.636321169" watchObservedRunningTime="2025-11-22 09:38:23.499947511 +0000 UTC m=+2099.642449801" Nov 22 09:38:29 crc kubenswrapper[4693]: I1122 09:38:29.651669 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:29 crc kubenswrapper[4693]: I1122 09:38:29.652148 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:29 crc kubenswrapper[4693]: I1122 09:38:29.689428 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:30 crc kubenswrapper[4693]: I1122 09:38:30.247050 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:38:30 crc kubenswrapper[4693]: I1122 09:38:30.247101 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:38:30 crc kubenswrapper[4693]: I1122 09:38:30.574360 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:30 crc kubenswrapper[4693]: I1122 09:38:30.613333 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sl95s"] Nov 22 09:38:32 crc kubenswrapper[4693]: I1122 09:38:32.557586 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-sl95s" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="registry-server" containerID="cri-o://03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946" gracePeriod=2 Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.424145 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.455215 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-catalog-content\") pod \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.455734 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-utilities\") pod \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.455793 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gz8gt\" (UniqueName: \"kubernetes.io/projected/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-kube-api-access-gz8gt\") pod \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\" (UID: \"c9a42769-fcb1-41ef-8af8-57869e9dbfd8\") " Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.456552 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-utilities" (OuterVolumeSpecName: "utilities") pod "c9a42769-fcb1-41ef-8af8-57869e9dbfd8" (UID: "c9a42769-fcb1-41ef-8af8-57869e9dbfd8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.464341 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-kube-api-access-gz8gt" (OuterVolumeSpecName: "kube-api-access-gz8gt") pod "c9a42769-fcb1-41ef-8af8-57869e9dbfd8" (UID: "c9a42769-fcb1-41ef-8af8-57869e9dbfd8"). InnerVolumeSpecName "kube-api-access-gz8gt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.468921 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.468942 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gz8gt\" (UniqueName: \"kubernetes.io/projected/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-kube-api-access-gz8gt\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.525744 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c9a42769-fcb1-41ef-8af8-57869e9dbfd8" (UID: "c9a42769-fcb1-41ef-8af8-57869e9dbfd8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.568689 4693 generic.go:334] "Generic (PLEG): container finished" podID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerID="03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946" exitCode=0 Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.568749 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerDied","Data":"03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946"} Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.568785 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-sl95s" event={"ID":"c9a42769-fcb1-41ef-8af8-57869e9dbfd8","Type":"ContainerDied","Data":"458c61599aa73eaa5dc2dced41890caf4304e2f6ff1e194a5bd148c8f6efe0ec"} Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.568806 4693 scope.go:117] "RemoveContainer" containerID="03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.568807 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-sl95s" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.570565 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c9a42769-fcb1-41ef-8af8-57869e9dbfd8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.602697 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-sl95s"] Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.607806 4693 scope.go:117] "RemoveContainer" containerID="d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.614343 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-sl95s"] Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.636697 4693 scope.go:117] "RemoveContainer" containerID="fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.658818 4693 scope.go:117] "RemoveContainer" containerID="03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946" Nov 22 09:38:33 crc kubenswrapper[4693]: E1122 09:38:33.659190 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946\": container with ID starting with 03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946 not found: ID does not exist" containerID="03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.659224 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946"} err="failed to get container status \"03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946\": rpc error: code = NotFound desc = could not find container \"03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946\": container with ID starting with 03d4b79d1b9f6abeb86813b2939edf762f5347a27a4e2638bd3c60afdcbba946 not found: ID does not exist" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.659246 4693 scope.go:117] "RemoveContainer" containerID="d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a" Nov 22 09:38:33 crc kubenswrapper[4693]: E1122 09:38:33.659590 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a\": container with ID starting with d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a not found: ID does not exist" containerID="d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.659631 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a"} err="failed to get container status \"d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a\": rpc error: code = NotFound desc = could not find container \"d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a\": container with ID starting with d1d50dd1f479ec34161530fe2cb7d4afbc063db81b313371024ddd60f48bfe7a not found: ID does not exist" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.659654 4693 scope.go:117] "RemoveContainer" containerID="fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3" Nov 22 09:38:33 crc kubenswrapper[4693]: E1122 09:38:33.659971 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3\": container with ID starting with fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3 not found: ID does not exist" containerID="fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3" Nov 22 09:38:33 crc kubenswrapper[4693]: I1122 09:38:33.659995 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3"} err="failed to get container status \"fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3\": rpc error: code = NotFound desc = could not find container \"fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3\": container with ID starting with fad3e58b39f98a529d70a5cdd1c68896be7957a04385d2f2f85aca9c4a29c6b3 not found: ID does not exist" Nov 22 09:38:34 crc kubenswrapper[4693]: I1122 09:38:34.156525 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" path="/var/lib/kubelet/pods/c9a42769-fcb1-41ef-8af8-57869e9dbfd8/volumes" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.894318 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 09:38:51 crc kubenswrapper[4693]: E1122 09:38:51.895543 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="extract-utilities" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.895559 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="extract-utilities" Nov 22 09:38:51 crc kubenswrapper[4693]: E1122 09:38:51.895643 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="registry-server" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.895655 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="registry-server" Nov 22 09:38:51 crc kubenswrapper[4693]: E1122 09:38:51.895674 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="extract-content" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.895681 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="extract-content" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.895930 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="c9a42769-fcb1-41ef-8af8-57869e9dbfd8" containerName="registry-server" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.897038 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.899651 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"test-operator-controller-priv-key" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.900124 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-96k5n" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.900374 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-custom-data-s0" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.900764 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.902431 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.907363 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.907431 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:51 crc kubenswrapper[4693]: I1122 09:38:51.907501 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-config-data\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.009716 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-config-data\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.009853 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.009921 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010184 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010223 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010277 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010307 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010329 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnt2m\" (UniqueName: \"kubernetes.io/projected/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-kube-api-access-hnt2m\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010358 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.010961 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-config-data\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.011761 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.017007 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config-secret\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.112957 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113041 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113087 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnt2m\" (UniqueName: \"kubernetes.io/projected/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-kube-api-access-hnt2m\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113117 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113230 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113271 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113581 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113625 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-temporary\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.113752 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-workdir\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.116955 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ssh-key\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.117885 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ca-certs\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.128294 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnt2m\" (UniqueName: \"kubernetes.io/projected/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-kube-api-access-hnt2m\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.136299 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"tempest-tests-tempest\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.215797 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.597674 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/tempest-tests-tempest"] Nov 22 09:38:52 crc kubenswrapper[4693]: I1122 09:38:52.715090 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"fe7aa61b-ddcb-48b3-9d95-c203790e13e5","Type":"ContainerStarted","Data":"76d097c17b6e61ce7c6e831a96027c38304e405fa8370cafebe044c16f674e93"} Nov 22 09:39:00 crc kubenswrapper[4693]: I1122 09:39:00.248191 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:39:00 crc kubenswrapper[4693]: I1122 09:39:00.249023 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:39:15 crc kubenswrapper[4693]: E1122 09:39:15.313176 4693 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified" Nov 22 09:39:15 crc kubenswrapper[4693]: E1122 09:39:15.314190 4693 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:tempest-tests-tempest-tests-runner,Image:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,Command:[],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:test-operator-ephemeral-workdir,ReadOnly:false,MountPath:/var/lib/tempest,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-ephemeral-temporary,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/etc/test_operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:test-operator-logs,ReadOnly:false,MountPath:/var/lib/tempest/external_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/etc/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config,ReadOnly:true,MountPath:/var/lib/tempest/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/etc/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ca-certs,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ssh-key,ReadOnly:false,MountPath:/var/lib/tempest/id_ecdsa,SubPath:ssh_key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hnt2m,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42480,RunAsNonRoot:*false,ReadOnlyRootFilesystem:*false,AllowPrivilegeEscalation:*true,RunAsGroup:*42480,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-custom-data-s0,},Optional:nil,},SecretRef:nil,},EnvFromSource{Prefix:,ConfigMapRef:&ConfigMapEnvSource{LocalObjectReference:LocalObjectReference{Name:tempest-tests-tempest-env-vars-s0,},Optional:nil,},SecretRef:nil,},},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod tempest-tests-tempest_openstack(fe7aa61b-ddcb-48b3-9d95-c203790e13e5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Nov 22 09:39:15 crc kubenswrapper[4693]: E1122 09:39:15.315456 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/tempest-tests-tempest" podUID="fe7aa61b-ddcb-48b3-9d95-c203790e13e5" Nov 22 09:39:15 crc kubenswrapper[4693]: E1122 09:39:15.988878 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"tempest-tests-tempest-tests-runner\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified\\\"\"" pod="openstack/tempest-tests-tempest" podUID="fe7aa61b-ddcb-48b3-9d95-c203790e13e5" Nov 22 09:39:30 crc kubenswrapper[4693]: I1122 09:39:30.246046 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:39:30 crc kubenswrapper[4693]: I1122 09:39:30.246664 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:39:30 crc kubenswrapper[4693]: I1122 09:39:30.246727 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:39:30 crc kubenswrapper[4693]: I1122 09:39:30.247489 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:39:30 crc kubenswrapper[4693]: I1122 09:39:30.247552 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" gracePeriod=600 Nov 22 09:39:30 crc kubenswrapper[4693]: E1122 09:39:30.303725 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7007d901_fc52_4723_a949_db71619b3305.slice/crio-conmon-d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7007d901_fc52_4723_a949_db71619b3305.slice/crio-d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25.scope\": RecentStats: unable to find data in memory cache]" Nov 22 09:39:30 crc kubenswrapper[4693]: E1122 09:39:30.362487 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:39:30 crc kubenswrapper[4693]: I1122 09:39:30.645823 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"tempest-tests-tempest-env-vars-s0" Nov 22 09:39:31 crc kubenswrapper[4693]: I1122 09:39:31.111168 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" exitCode=0 Nov 22 09:39:31 crc kubenswrapper[4693]: I1122 09:39:31.111232 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25"} Nov 22 09:39:31 crc kubenswrapper[4693]: I1122 09:39:31.111540 4693 scope.go:117] "RemoveContainer" containerID="87e5a0a229137bf1f778e2fd090c6a9d8030458f6db183eab75ce472c14ee171" Nov 22 09:39:31 crc kubenswrapper[4693]: I1122 09:39:31.113255 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:39:31 crc kubenswrapper[4693]: E1122 09:39:31.113878 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:39:32 crc kubenswrapper[4693]: I1122 09:39:32.123271 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"fe7aa61b-ddcb-48b3-9d95-c203790e13e5","Type":"ContainerStarted","Data":"64a982a9ddd8480a85f210190ec778db024fc59067d797a36346722b1ca493ae"} Nov 22 09:39:32 crc kubenswrapper[4693]: I1122 09:39:32.148955 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/tempest-tests-tempest" podStartSLOduration=4.110521457 podStartE2EDuration="42.148920883s" podCreationTimestamp="2025-11-22 09:38:50 +0000 UTC" firstStartedPulling="2025-11-22 09:38:52.604627167 +0000 UTC m=+2128.747129458" lastFinishedPulling="2025-11-22 09:39:30.643026593 +0000 UTC m=+2166.785528884" observedRunningTime="2025-11-22 09:39:32.138209844 +0000 UTC m=+2168.280712135" watchObservedRunningTime="2025-11-22 09:39:32.148920883 +0000 UTC m=+2168.291423164" Nov 22 09:39:44 crc kubenswrapper[4693]: I1122 09:39:44.152492 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:39:44 crc kubenswrapper[4693]: E1122 09:39:44.153382 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:39:58 crc kubenswrapper[4693]: I1122 09:39:58.146763 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:39:58 crc kubenswrapper[4693]: E1122 09:39:58.147369 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:40:13 crc kubenswrapper[4693]: I1122 09:40:13.146484 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:40:13 crc kubenswrapper[4693]: E1122 09:40:13.147455 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:40:28 crc kubenswrapper[4693]: I1122 09:40:28.147414 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:40:28 crc kubenswrapper[4693]: E1122 09:40:28.148245 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:40:41 crc kubenswrapper[4693]: I1122 09:40:41.147645 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:40:41 crc kubenswrapper[4693]: E1122 09:40:41.148774 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:40:55 crc kubenswrapper[4693]: I1122 09:40:55.147360 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:40:55 crc kubenswrapper[4693]: E1122 09:40:55.148408 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:41:10 crc kubenswrapper[4693]: I1122 09:41:10.147144 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:41:10 crc kubenswrapper[4693]: E1122 09:41:10.148077 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:41:25 crc kubenswrapper[4693]: I1122 09:41:25.148582 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:41:25 crc kubenswrapper[4693]: E1122 09:41:25.149813 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:41:39 crc kubenswrapper[4693]: I1122 09:41:39.147285 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:41:39 crc kubenswrapper[4693]: E1122 09:41:39.148671 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:41:50 crc kubenswrapper[4693]: I1122 09:41:50.147326 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:41:50 crc kubenswrapper[4693]: E1122 09:41:50.148132 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:42:04 crc kubenswrapper[4693]: I1122 09:42:04.152165 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:42:04 crc kubenswrapper[4693]: E1122 09:42:04.152971 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:42:17 crc kubenswrapper[4693]: I1122 09:42:17.146756 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:42:17 crc kubenswrapper[4693]: E1122 09:42:17.147743 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:42:31 crc kubenswrapper[4693]: I1122 09:42:31.146935 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:42:31 crc kubenswrapper[4693]: E1122 09:42:31.147779 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:42:45 crc kubenswrapper[4693]: I1122 09:42:45.146771 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:42:45 crc kubenswrapper[4693]: E1122 09:42:45.147743 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:43:00 crc kubenswrapper[4693]: I1122 09:43:00.146926 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:43:00 crc kubenswrapper[4693]: E1122 09:43:00.147862 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:43:11 crc kubenswrapper[4693]: I1122 09:43:11.147339 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:43:11 crc kubenswrapper[4693]: E1122 09:43:11.148201 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:43:24 crc kubenswrapper[4693]: I1122 09:43:24.172000 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:43:24 crc kubenswrapper[4693]: E1122 09:43:24.173542 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:43:37 crc kubenswrapper[4693]: I1122 09:43:37.146521 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:43:37 crc kubenswrapper[4693]: E1122 09:43:37.147143 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:43:50 crc kubenswrapper[4693]: I1122 09:43:50.147201 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:43:50 crc kubenswrapper[4693]: E1122 09:43:50.148131 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:44:05 crc kubenswrapper[4693]: I1122 09:44:05.146540 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:44:05 crc kubenswrapper[4693]: E1122 09:44:05.147625 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:44:16 crc kubenswrapper[4693]: I1122 09:44:16.147202 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:44:16 crc kubenswrapper[4693]: E1122 09:44:16.148029 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:44:30 crc kubenswrapper[4693]: I1122 09:44:30.147831 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:44:30 crc kubenswrapper[4693]: E1122 09:44:30.148866 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:44:42 crc kubenswrapper[4693]: I1122 09:44:42.146319 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:44:42 crc kubenswrapper[4693]: I1122 09:44:42.595298 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"d080c0d727f9a8fa0a772106ee876e3fffb5d3da555e02db40176872295f0d5c"} Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.143693 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672"] Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.145206 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.156184 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.157263 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.186575 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672"] Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.286348 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cdab8a95-3c9a-48ed-8309-b0e44ec86792-config-volume\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.286414 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tsqcw\" (UniqueName: \"kubernetes.io/projected/cdab8a95-3c9a-48ed-8309-b0e44ec86792-kube-api-access-tsqcw\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.286447 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cdab8a95-3c9a-48ed-8309-b0e44ec86792-secret-volume\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.387471 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cdab8a95-3c9a-48ed-8309-b0e44ec86792-secret-volume\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.387606 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cdab8a95-3c9a-48ed-8309-b0e44ec86792-config-volume\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.387648 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tsqcw\" (UniqueName: \"kubernetes.io/projected/cdab8a95-3c9a-48ed-8309-b0e44ec86792-kube-api-access-tsqcw\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.388727 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cdab8a95-3c9a-48ed-8309-b0e44ec86792-config-volume\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.395022 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cdab8a95-3c9a-48ed-8309-b0e44ec86792-secret-volume\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.404117 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tsqcw\" (UniqueName: \"kubernetes.io/projected/cdab8a95-3c9a-48ed-8309-b0e44ec86792-kube-api-access-tsqcw\") pod \"collect-profiles-29396745-sp672\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.488956 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:00 crc kubenswrapper[4693]: I1122 09:45:00.896628 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672"] Nov 22 09:45:01 crc kubenswrapper[4693]: I1122 09:45:01.749365 4693 generic.go:334] "Generic (PLEG): container finished" podID="cdab8a95-3c9a-48ed-8309-b0e44ec86792" containerID="df0c7f36a7025826ed22753fcc2189f7ebe4dfe2fb285e80c2ecdab61e8a6f7a" exitCode=0 Nov 22 09:45:01 crc kubenswrapper[4693]: I1122 09:45:01.749427 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" event={"ID":"cdab8a95-3c9a-48ed-8309-b0e44ec86792","Type":"ContainerDied","Data":"df0c7f36a7025826ed22753fcc2189f7ebe4dfe2fb285e80c2ecdab61e8a6f7a"} Nov 22 09:45:01 crc kubenswrapper[4693]: I1122 09:45:01.749665 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" event={"ID":"cdab8a95-3c9a-48ed-8309-b0e44ec86792","Type":"ContainerStarted","Data":"6c2948bcbee2fbaa419bb6298a35639cc01c4a498f71e025456e540cf47ade18"} Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.036944 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.239736 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cdab8a95-3c9a-48ed-8309-b0e44ec86792-secret-volume\") pod \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.240026 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tsqcw\" (UniqueName: \"kubernetes.io/projected/cdab8a95-3c9a-48ed-8309-b0e44ec86792-kube-api-access-tsqcw\") pod \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.240282 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cdab8a95-3c9a-48ed-8309-b0e44ec86792-config-volume\") pod \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\" (UID: \"cdab8a95-3c9a-48ed-8309-b0e44ec86792\") " Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.240945 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cdab8a95-3c9a-48ed-8309-b0e44ec86792-config-volume" (OuterVolumeSpecName: "config-volume") pod "cdab8a95-3c9a-48ed-8309-b0e44ec86792" (UID: "cdab8a95-3c9a-48ed-8309-b0e44ec86792"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.241609 4693 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/cdab8a95-3c9a-48ed-8309-b0e44ec86792-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.246969 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cdab8a95-3c9a-48ed-8309-b0e44ec86792-kube-api-access-tsqcw" (OuterVolumeSpecName: "kube-api-access-tsqcw") pod "cdab8a95-3c9a-48ed-8309-b0e44ec86792" (UID: "cdab8a95-3c9a-48ed-8309-b0e44ec86792"). InnerVolumeSpecName "kube-api-access-tsqcw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.247374 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cdab8a95-3c9a-48ed-8309-b0e44ec86792-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "cdab8a95-3c9a-48ed-8309-b0e44ec86792" (UID: "cdab8a95-3c9a-48ed-8309-b0e44ec86792"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.344306 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tsqcw\" (UniqueName: \"kubernetes.io/projected/cdab8a95-3c9a-48ed-8309-b0e44ec86792-kube-api-access-tsqcw\") on node \"crc\" DevicePath \"\"" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.344340 4693 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/cdab8a95-3c9a-48ed-8309-b0e44ec86792-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.769809 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" event={"ID":"cdab8a95-3c9a-48ed-8309-b0e44ec86792","Type":"ContainerDied","Data":"6c2948bcbee2fbaa419bb6298a35639cc01c4a498f71e025456e540cf47ade18"} Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.769887 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c2948bcbee2fbaa419bb6298a35639cc01c4a498f71e025456e540cf47ade18" Nov 22 09:45:03 crc kubenswrapper[4693]: I1122 09:45:03.769896 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396745-sp672" Nov 22 09:45:04 crc kubenswrapper[4693]: I1122 09:45:04.136499 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf"] Nov 22 09:45:04 crc kubenswrapper[4693]: I1122 09:45:04.142365 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396700-nqzbf"] Nov 22 09:45:04 crc kubenswrapper[4693]: I1122 09:45:04.171299 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ec0a6f6-e561-4b7d-89db-7bb147f8f21e" path="/var/lib/kubelet/pods/3ec0a6f6-e561-4b7d-89db-7bb147f8f21e/volumes" Nov 22 09:45:33 crc kubenswrapper[4693]: I1122 09:45:33.253251 4693 scope.go:117] "RemoveContainer" containerID="3a453d3f49527e8f6c4160ce23e02c379403b95bfc685b6865e6b91a0604fba8" Nov 22 09:47:00 crc kubenswrapper[4693]: I1122 09:47:00.246271 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:47:00 crc kubenswrapper[4693]: I1122 09:47:00.246833 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.382598 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-75n5s"] Nov 22 09:47:21 crc kubenswrapper[4693]: E1122 09:47:21.385358 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cdab8a95-3c9a-48ed-8309-b0e44ec86792" containerName="collect-profiles" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.385445 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="cdab8a95-3c9a-48ed-8309-b0e44ec86792" containerName="collect-profiles" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.385777 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="cdab8a95-3c9a-48ed-8309-b0e44ec86792" containerName="collect-profiles" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.388039 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.395141 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75n5s"] Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.579334 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-catalog-content\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.579480 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-utilities\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.579548 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zctpd\" (UniqueName: \"kubernetes.io/projected/df3287d8-31b3-468b-941b-d8724b55980a-kube-api-access-zctpd\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.680988 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-catalog-content\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.681068 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-utilities\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.681131 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zctpd\" (UniqueName: \"kubernetes.io/projected/df3287d8-31b3-468b-941b-d8724b55980a-kube-api-access-zctpd\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.681519 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-catalog-content\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.681527 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-utilities\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.703757 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zctpd\" (UniqueName: \"kubernetes.io/projected/df3287d8-31b3-468b-941b-d8724b55980a-kube-api-access-zctpd\") pod \"certified-operators-75n5s\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:21 crc kubenswrapper[4693]: I1122 09:47:21.709869 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:22 crc kubenswrapper[4693]: I1122 09:47:22.156100 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-75n5s"] Nov 22 09:47:22 crc kubenswrapper[4693]: W1122 09:47:22.159736 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf3287d8_31b3_468b_941b_d8724b55980a.slice/crio-175ae2c1f8a18b6cbe081b8def944744a1e3dfeb63e9012821e582bac72e7380 WatchSource:0}: Error finding container 175ae2c1f8a18b6cbe081b8def944744a1e3dfeb63e9012821e582bac72e7380: Status 404 returned error can't find the container with id 175ae2c1f8a18b6cbe081b8def944744a1e3dfeb63e9012821e582bac72e7380 Nov 22 09:47:23 crc kubenswrapper[4693]: I1122 09:47:23.057712 4693 generic.go:334] "Generic (PLEG): container finished" podID="df3287d8-31b3-468b-941b-d8724b55980a" containerID="e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e" exitCode=0 Nov 22 09:47:23 crc kubenswrapper[4693]: I1122 09:47:23.058033 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerDied","Data":"e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e"} Nov 22 09:47:23 crc kubenswrapper[4693]: I1122 09:47:23.058067 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerStarted","Data":"175ae2c1f8a18b6cbe081b8def944744a1e3dfeb63e9012821e582bac72e7380"} Nov 22 09:47:23 crc kubenswrapper[4693]: I1122 09:47:23.060427 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:47:24 crc kubenswrapper[4693]: I1122 09:47:24.073521 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerStarted","Data":"61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc"} Nov 22 09:47:25 crc kubenswrapper[4693]: I1122 09:47:25.085040 4693 generic.go:334] "Generic (PLEG): container finished" podID="df3287d8-31b3-468b-941b-d8724b55980a" containerID="61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc" exitCode=0 Nov 22 09:47:25 crc kubenswrapper[4693]: I1122 09:47:25.085105 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerDied","Data":"61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc"} Nov 22 09:47:26 crc kubenswrapper[4693]: I1122 09:47:26.107212 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerStarted","Data":"f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a"} Nov 22 09:47:26 crc kubenswrapper[4693]: I1122 09:47:26.122933 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-75n5s" podStartSLOduration=2.493149715 podStartE2EDuration="5.122914068s" podCreationTimestamp="2025-11-22 09:47:21 +0000 UTC" firstStartedPulling="2025-11-22 09:47:23.059903788 +0000 UTC m=+2639.202406078" lastFinishedPulling="2025-11-22 09:47:25.68966814 +0000 UTC m=+2641.832170431" observedRunningTime="2025-11-22 09:47:26.121115056 +0000 UTC m=+2642.263617346" watchObservedRunningTime="2025-11-22 09:47:26.122914068 +0000 UTC m=+2642.265416360" Nov 22 09:47:30 crc kubenswrapper[4693]: I1122 09:47:30.246651 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:47:30 crc kubenswrapper[4693]: I1122 09:47:30.247204 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:47:31 crc kubenswrapper[4693]: I1122 09:47:31.710651 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:31 crc kubenswrapper[4693]: I1122 09:47:31.712397 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:31 crc kubenswrapper[4693]: I1122 09:47:31.761111 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:32 crc kubenswrapper[4693]: I1122 09:47:32.224732 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:32 crc kubenswrapper[4693]: I1122 09:47:32.264938 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75n5s"] Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.198195 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-75n5s" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="registry-server" containerID="cri-o://f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a" gracePeriod=2 Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.651978 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.759168 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zctpd\" (UniqueName: \"kubernetes.io/projected/df3287d8-31b3-468b-941b-d8724b55980a-kube-api-access-zctpd\") pod \"df3287d8-31b3-468b-941b-d8724b55980a\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.759618 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-utilities\") pod \"df3287d8-31b3-468b-941b-d8724b55980a\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.759759 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-catalog-content\") pod \"df3287d8-31b3-468b-941b-d8724b55980a\" (UID: \"df3287d8-31b3-468b-941b-d8724b55980a\") " Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.760150 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-utilities" (OuterVolumeSpecName: "utilities") pod "df3287d8-31b3-468b-941b-d8724b55980a" (UID: "df3287d8-31b3-468b-941b-d8724b55980a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.761270 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.768730 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df3287d8-31b3-468b-941b-d8724b55980a-kube-api-access-zctpd" (OuterVolumeSpecName: "kube-api-access-zctpd") pod "df3287d8-31b3-468b-941b-d8724b55980a" (UID: "df3287d8-31b3-468b-941b-d8724b55980a"). InnerVolumeSpecName "kube-api-access-zctpd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.798281 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df3287d8-31b3-468b-941b-d8724b55980a" (UID: "df3287d8-31b3-468b-941b-d8724b55980a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.863675 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zctpd\" (UniqueName: \"kubernetes.io/projected/df3287d8-31b3-468b-941b-d8724b55980a-kube-api-access-zctpd\") on node \"crc\" DevicePath \"\"" Nov 22 09:47:34 crc kubenswrapper[4693]: I1122 09:47:34.863714 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df3287d8-31b3-468b-941b-d8724b55980a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.211256 4693 generic.go:334] "Generic (PLEG): container finished" podID="df3287d8-31b3-468b-941b-d8724b55980a" containerID="f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a" exitCode=0 Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.211316 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerDied","Data":"f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a"} Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.211339 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-75n5s" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.211366 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-75n5s" event={"ID":"df3287d8-31b3-468b-941b-d8724b55980a","Type":"ContainerDied","Data":"175ae2c1f8a18b6cbe081b8def944744a1e3dfeb63e9012821e582bac72e7380"} Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.211395 4693 scope.go:117] "RemoveContainer" containerID="f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.242881 4693 scope.go:117] "RemoveContainer" containerID="61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.246681 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-75n5s"] Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.257071 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-75n5s"] Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.278483 4693 scope.go:117] "RemoveContainer" containerID="e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.304959 4693 scope.go:117] "RemoveContainer" containerID="f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a" Nov 22 09:47:35 crc kubenswrapper[4693]: E1122 09:47:35.305429 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a\": container with ID starting with f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a not found: ID does not exist" containerID="f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.305458 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a"} err="failed to get container status \"f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a\": rpc error: code = NotFound desc = could not find container \"f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a\": container with ID starting with f7b20f7df2ef45c50ad3e45437b37e1de70ad623bf48da1d0c117b6d2b7c5d4a not found: ID does not exist" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.305483 4693 scope.go:117] "RemoveContainer" containerID="61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc" Nov 22 09:47:35 crc kubenswrapper[4693]: E1122 09:47:35.305763 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc\": container with ID starting with 61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc not found: ID does not exist" containerID="61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.305785 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc"} err="failed to get container status \"61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc\": rpc error: code = NotFound desc = could not find container \"61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc\": container with ID starting with 61c4962ae3a253a736920e4f5bfb377c711de822b6312020c8072b72b256b6bc not found: ID does not exist" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.305799 4693 scope.go:117] "RemoveContainer" containerID="e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e" Nov 22 09:47:35 crc kubenswrapper[4693]: E1122 09:47:35.306125 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e\": container with ID starting with e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e not found: ID does not exist" containerID="e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e" Nov 22 09:47:35 crc kubenswrapper[4693]: I1122 09:47:35.306147 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e"} err="failed to get container status \"e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e\": rpc error: code = NotFound desc = could not find container \"e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e\": container with ID starting with e37f87c49f1ea76751bb4dc4b42c40d88202d8d284ebd5e29d143ed9e3101f1e not found: ID does not exist" Nov 22 09:47:36 crc kubenswrapper[4693]: I1122 09:47:36.158502 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df3287d8-31b3-468b-941b-d8724b55980a" path="/var/lib/kubelet/pods/df3287d8-31b3-468b-941b-d8724b55980a/volumes" Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.246192 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.246689 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.246739 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.247592 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d080c0d727f9a8fa0a772106ee876e3fffb5d3da555e02db40176872295f0d5c"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.247638 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://d080c0d727f9a8fa0a772106ee876e3fffb5d3da555e02db40176872295f0d5c" gracePeriod=600 Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.450417 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="d080c0d727f9a8fa0a772106ee876e3fffb5d3da555e02db40176872295f0d5c" exitCode=0 Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.450514 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"d080c0d727f9a8fa0a772106ee876e3fffb5d3da555e02db40176872295f0d5c"} Nov 22 09:48:00 crc kubenswrapper[4693]: I1122 09:48:00.450745 4693 scope.go:117] "RemoveContainer" containerID="d5dd8559ac6dd007dd3991ca994f6e2ca3e88f62234a76743049577c251c0b25" Nov 22 09:48:01 crc kubenswrapper[4693]: I1122 09:48:01.462188 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba"} Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.652143 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-mpfdd"] Nov 22 09:48:15 crc kubenswrapper[4693]: E1122 09:48:15.653615 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="extract-content" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.653635 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="extract-content" Nov 22 09:48:15 crc kubenswrapper[4693]: E1122 09:48:15.653679 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="registry-server" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.653687 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="registry-server" Nov 22 09:48:15 crc kubenswrapper[4693]: E1122 09:48:15.653704 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="extract-utilities" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.653712 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="extract-utilities" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.654030 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="df3287d8-31b3-468b-941b-d8724b55980a" containerName="registry-server" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.656351 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.669804 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mpfdd"] Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.688246 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-utilities\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.688446 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w2p8n\" (UniqueName: \"kubernetes.io/projected/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-kube-api-access-w2p8n\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.688655 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-catalog-content\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.791278 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-catalog-content\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.791451 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-utilities\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.791516 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w2p8n\" (UniqueName: \"kubernetes.io/projected/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-kube-api-access-w2p8n\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.791970 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-catalog-content\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.792063 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-utilities\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.811581 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w2p8n\" (UniqueName: \"kubernetes.io/projected/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-kube-api-access-w2p8n\") pod \"community-operators-mpfdd\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:15 crc kubenswrapper[4693]: I1122 09:48:15.978136 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:16 crc kubenswrapper[4693]: I1122 09:48:16.446277 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-mpfdd"] Nov 22 09:48:16 crc kubenswrapper[4693]: I1122 09:48:16.595699 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpfdd" event={"ID":"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7","Type":"ContainerStarted","Data":"39445d1a873ba13d0d5446658fa5d4b41d7ea70af561feaea0bc093bf703ef93"} Nov 22 09:48:17 crc kubenswrapper[4693]: I1122 09:48:17.604113 4693 generic.go:334] "Generic (PLEG): container finished" podID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerID="c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5" exitCode=0 Nov 22 09:48:17 crc kubenswrapper[4693]: I1122 09:48:17.604161 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpfdd" event={"ID":"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7","Type":"ContainerDied","Data":"c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5"} Nov 22 09:48:18 crc kubenswrapper[4693]: I1122 09:48:18.616358 4693 generic.go:334] "Generic (PLEG): container finished" podID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerID="18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92" exitCode=0 Nov 22 09:48:18 crc kubenswrapper[4693]: I1122 09:48:18.616477 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpfdd" event={"ID":"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7","Type":"ContainerDied","Data":"18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92"} Nov 22 09:48:19 crc kubenswrapper[4693]: I1122 09:48:19.643466 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpfdd" event={"ID":"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7","Type":"ContainerStarted","Data":"1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a"} Nov 22 09:48:19 crc kubenswrapper[4693]: I1122 09:48:19.666564 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-mpfdd" podStartSLOduration=3.212784448 podStartE2EDuration="4.666545329s" podCreationTimestamp="2025-11-22 09:48:15 +0000 UTC" firstStartedPulling="2025-11-22 09:48:17.605718087 +0000 UTC m=+2693.748220378" lastFinishedPulling="2025-11-22 09:48:19.059478968 +0000 UTC m=+2695.201981259" observedRunningTime="2025-11-22 09:48:19.656290575 +0000 UTC m=+2695.798792867" watchObservedRunningTime="2025-11-22 09:48:19.666545329 +0000 UTC m=+2695.809047621" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.423124 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-z4kwx"] Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.425693 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.429115 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4kwx"] Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.471009 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpst2\" (UniqueName: \"kubernetes.io/projected/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-kube-api-access-mpst2\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.471407 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-utilities\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.471558 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-catalog-content\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.574896 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-utilities\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.575029 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-catalog-content\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.575081 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpst2\" (UniqueName: \"kubernetes.io/projected/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-kube-api-access-mpst2\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.576230 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-catalog-content\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.576479 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-utilities\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.595938 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpst2\" (UniqueName: \"kubernetes.io/projected/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-kube-api-access-mpst2\") pod \"redhat-marketplace-z4kwx\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.693208 4693 generic.go:334] "Generic (PLEG): container finished" podID="fe7aa61b-ddcb-48b3-9d95-c203790e13e5" containerID="64a982a9ddd8480a85f210190ec778db024fc59067d797a36346722b1ca493ae" exitCode=0 Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.693294 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"fe7aa61b-ddcb-48b3-9d95-c203790e13e5","Type":"ContainerDied","Data":"64a982a9ddd8480a85f210190ec778db024fc59067d797a36346722b1ca493ae"} Nov 22 09:48:24 crc kubenswrapper[4693]: I1122 09:48:24.769598 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:25 crc kubenswrapper[4693]: I1122 09:48:25.211487 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4kwx"] Nov 22 09:48:25 crc kubenswrapper[4693]: I1122 09:48:25.705345 4693 generic.go:334] "Generic (PLEG): container finished" podID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerID="ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3" exitCode=0 Nov 22 09:48:25 crc kubenswrapper[4693]: I1122 09:48:25.705407 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerDied","Data":"ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3"} Nov 22 09:48:25 crc kubenswrapper[4693]: I1122 09:48:25.705706 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerStarted","Data":"5eb90202fb9987b471c544b86a701d815b69a19da29c32ecc425755158e50d45"} Nov 22 09:48:25 crc kubenswrapper[4693]: I1122 09:48:25.978488 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:25 crc kubenswrapper[4693]: I1122 09:48:25.978784 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.017938 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.023564 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106494 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-temporary\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106547 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106588 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ssh-key\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106706 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config-secret\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106723 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnt2m\" (UniqueName: \"kubernetes.io/projected/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-kube-api-access-hnt2m\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106772 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ca-certs\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106805 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-workdir\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106864 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"test-operator-logs\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.106887 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-config-data\") pod \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\" (UID: \"fe7aa61b-ddcb-48b3-9d95-c203790e13e5\") " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.107706 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-temporary" (OuterVolumeSpecName: "test-operator-ephemeral-temporary") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "test-operator-ephemeral-temporary". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.108280 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-config-data" (OuterVolumeSpecName: "config-data") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.112392 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "test-operator-logs") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.113644 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-workdir" (OuterVolumeSpecName: "test-operator-ephemeral-workdir") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "test-operator-ephemeral-workdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.113859 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-kube-api-access-hnt2m" (OuterVolumeSpecName: "kube-api-access-hnt2m") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "kube-api-access-hnt2m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.131802 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.134345 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ca-certs" (OuterVolumeSpecName: "ca-certs") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "ca-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.134738 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.147546 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "fe7aa61b-ddcb-48b3-9d95-c203790e13e5" (UID: "fe7aa61b-ddcb-48b3-9d95-c203790e13e5"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209022 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnt2m\" (UniqueName: \"kubernetes.io/projected/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-kube-api-access-hnt2m\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209052 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209064 4693 reconciler_common.go:293] "Volume detached for volume \"ca-certs\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ca-certs\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209086 4693 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-workdir\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-workdir\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209114 4693 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209127 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209138 4693 reconciler_common.go:293] "Volume detached for volume \"test-operator-ephemeral-temporary\" (UniqueName: \"kubernetes.io/empty-dir/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-test-operator-ephemeral-temporary\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209154 4693 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-openstack-config\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.209168 4693 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fe7aa61b-ddcb-48b3-9d95-c203790e13e5-ssh-key\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.227629 4693 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.311100 4693 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.720752 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/tempest-tests-tempest" event={"ID":"fe7aa61b-ddcb-48b3-9d95-c203790e13e5","Type":"ContainerDied","Data":"76d097c17b6e61ce7c6e831a96027c38304e405fa8370cafebe044c16f674e93"} Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.721168 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="76d097c17b6e61ce7c6e831a96027c38304e405fa8370cafebe044c16f674e93" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.720864 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/tempest-tests-tempest" Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.724193 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerStarted","Data":"417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03"} Nov 22 09:48:26 crc kubenswrapper[4693]: I1122 09:48:26.764768 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:27 crc kubenswrapper[4693]: I1122 09:48:27.736264 4693 generic.go:334] "Generic (PLEG): container finished" podID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerID="417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03" exitCode=0 Nov 22 09:48:27 crc kubenswrapper[4693]: I1122 09:48:27.736395 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerDied","Data":"417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03"} Nov 22 09:48:28 crc kubenswrapper[4693]: I1122 09:48:28.396626 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mpfdd"] Nov 22 09:48:28 crc kubenswrapper[4693]: I1122 09:48:28.752338 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerStarted","Data":"392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e"} Nov 22 09:48:28 crc kubenswrapper[4693]: I1122 09:48:28.752556 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-mpfdd" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="registry-server" containerID="cri-o://1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a" gracePeriod=2 Nov 22 09:48:28 crc kubenswrapper[4693]: I1122 09:48:28.782638 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-z4kwx" podStartSLOduration=2.279085069 podStartE2EDuration="4.782613229s" podCreationTimestamp="2025-11-22 09:48:24 +0000 UTC" firstStartedPulling="2025-11-22 09:48:25.706986103 +0000 UTC m=+2701.849488394" lastFinishedPulling="2025-11-22 09:48:28.210514273 +0000 UTC m=+2704.353016554" observedRunningTime="2025-11-22 09:48:28.777682888 +0000 UTC m=+2704.920185180" watchObservedRunningTime="2025-11-22 09:48:28.782613229 +0000 UTC m=+2704.925115520" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.180973 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.367024 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-utilities\") pod \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.367346 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-catalog-content\") pod \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.367633 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w2p8n\" (UniqueName: \"kubernetes.io/projected/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-kube-api-access-w2p8n\") pod \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\" (UID: \"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7\") " Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.367770 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-utilities" (OuterVolumeSpecName: "utilities") pod "fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" (UID: "fbb44ad6-cb19-4435-a4cf-dca69cf66fc7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.368323 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.376054 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-kube-api-access-w2p8n" (OuterVolumeSpecName: "kube-api-access-w2p8n") pod "fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" (UID: "fbb44ad6-cb19-4435-a4cf-dca69cf66fc7"). InnerVolumeSpecName "kube-api-access-w2p8n". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.410260 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" (UID: "fbb44ad6-cb19-4435-a4cf-dca69cf66fc7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.471237 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w2p8n\" (UniqueName: \"kubernetes.io/projected/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-kube-api-access-w2p8n\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.471275 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.765669 4693 generic.go:334] "Generic (PLEG): container finished" podID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerID="1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a" exitCode=0 Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.765770 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-mpfdd" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.765820 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpfdd" event={"ID":"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7","Type":"ContainerDied","Data":"1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a"} Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.765921 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-mpfdd" event={"ID":"fbb44ad6-cb19-4435-a4cf-dca69cf66fc7","Type":"ContainerDied","Data":"39445d1a873ba13d0d5446658fa5d4b41d7ea70af561feaea0bc093bf703ef93"} Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.765947 4693 scope.go:117] "RemoveContainer" containerID="1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.792277 4693 scope.go:117] "RemoveContainer" containerID="18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.795330 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-mpfdd"] Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.803221 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-mpfdd"] Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.810039 4693 scope.go:117] "RemoveContainer" containerID="c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.841606 4693 scope.go:117] "RemoveContainer" containerID="1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a" Nov 22 09:48:29 crc kubenswrapper[4693]: E1122 09:48:29.842584 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a\": container with ID starting with 1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a not found: ID does not exist" containerID="1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.842623 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a"} err="failed to get container status \"1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a\": rpc error: code = NotFound desc = could not find container \"1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a\": container with ID starting with 1b3ec7c89ddc84fc5de538952301eda826caf530ba6c2e7e1b853f4e5d47478a not found: ID does not exist" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.842668 4693 scope.go:117] "RemoveContainer" containerID="18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92" Nov 22 09:48:29 crc kubenswrapper[4693]: E1122 09:48:29.843133 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92\": container with ID starting with 18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92 not found: ID does not exist" containerID="18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.843152 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92"} err="failed to get container status \"18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92\": rpc error: code = NotFound desc = could not find container \"18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92\": container with ID starting with 18e0950ea21619a42da46e1fda7ccff84998137aa15bca00c2859a8e4f353b92 not found: ID does not exist" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.843181 4693 scope.go:117] "RemoveContainer" containerID="c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5" Nov 22 09:48:29 crc kubenswrapper[4693]: E1122 09:48:29.843388 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5\": container with ID starting with c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5 not found: ID does not exist" containerID="c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5" Nov 22 09:48:29 crc kubenswrapper[4693]: I1122 09:48:29.843420 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5"} err="failed to get container status \"c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5\": rpc error: code = NotFound desc = could not find container \"c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5\": container with ID starting with c68d8bb772acd084cf56cd5b43f4cb9e806db0d4570de443941a3a3c254c52d5 not found: ID does not exist" Nov 22 09:48:30 crc kubenswrapper[4693]: I1122 09:48:30.161464 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" path="/var/lib/kubelet/pods/fbb44ad6-cb19-4435-a4cf-dca69cf66fc7/volumes" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.203529 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 09:48:34 crc kubenswrapper[4693]: E1122 09:48:34.216005 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="registry-server" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.216046 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="registry-server" Nov 22 09:48:34 crc kubenswrapper[4693]: E1122 09:48:34.216096 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="extract-utilities" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.216104 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="extract-utilities" Nov 22 09:48:34 crc kubenswrapper[4693]: E1122 09:48:34.216120 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fe7aa61b-ddcb-48b3-9d95-c203790e13e5" containerName="tempest-tests-tempest-tests-runner" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.216127 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="fe7aa61b-ddcb-48b3-9d95-c203790e13e5" containerName="tempest-tests-tempest-tests-runner" Nov 22 09:48:34 crc kubenswrapper[4693]: E1122 09:48:34.216173 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="extract-content" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.216181 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="extract-content" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.216940 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="fbb44ad6-cb19-4435-a4cf-dca69cf66fc7" containerName="registry-server" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.216995 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="fe7aa61b-ddcb-48b3-9d95-c203790e13e5" containerName="tempest-tests-tempest-tests-runner" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.217947 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.218066 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.220732 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"default-dockercfg-96k5n" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.361678 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.362191 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x4hpl\" (UniqueName: \"kubernetes.io/projected/1eef7711-f623-4d45-bf25-449b7cbc4e53-kube-api-access-x4hpl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.465103 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.465422 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x4hpl\" (UniqueName: \"kubernetes.io/projected/1eef7711-f623-4d45-bf25-449b7cbc4e53-kube-api-access-x4hpl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.465596 4693 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.483197 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x4hpl\" (UniqueName: \"kubernetes.io/projected/1eef7711-f623-4d45-bf25-449b7cbc4e53-kube-api-access-x4hpl\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.487440 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"test-operator-logs-pod-tempest-tempest-tests-tempest\" (UID: \"1eef7711-f623-4d45-bf25-449b7cbc4e53\") " pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.540083 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.769778 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.770029 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.805106 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.845204 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:34 crc kubenswrapper[4693]: I1122 09:48:34.937815 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/test-operator-logs-pod-tempest-tempest-tests-tempest"] Nov 22 09:48:35 crc kubenswrapper[4693]: I1122 09:48:35.040599 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4kwx"] Nov 22 09:48:35 crc kubenswrapper[4693]: I1122 09:48:35.821839 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"1eef7711-f623-4d45-bf25-449b7cbc4e53","Type":"ContainerStarted","Data":"f29e8e9dd51f57847e48279f61d7349e2b19b989165e12e2c35acf6f45693e65"} Nov 22 09:48:36 crc kubenswrapper[4693]: I1122 09:48:36.833793 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" event={"ID":"1eef7711-f623-4d45-bf25-449b7cbc4e53","Type":"ContainerStarted","Data":"d50c69423e1ffb0f970a01dd5434f87145ce3eb1781afb54fe515ad6d8fc54ae"} Nov 22 09:48:36 crc kubenswrapper[4693]: I1122 09:48:36.834802 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-z4kwx" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="registry-server" containerID="cri-o://392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e" gracePeriod=2 Nov 22 09:48:36 crc kubenswrapper[4693]: I1122 09:48:36.853816 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/test-operator-logs-pod-tempest-tempest-tests-tempest" podStartSLOduration=2.029182263 podStartE2EDuration="2.853796412s" podCreationTimestamp="2025-11-22 09:48:34 +0000 UTC" firstStartedPulling="2025-11-22 09:48:34.942797596 +0000 UTC m=+2711.085299887" lastFinishedPulling="2025-11-22 09:48:35.767411746 +0000 UTC m=+2711.909914036" observedRunningTime="2025-11-22 09:48:36.85058368 +0000 UTC m=+2712.993085971" watchObservedRunningTime="2025-11-22 09:48:36.853796412 +0000 UTC m=+2712.996298704" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.240903 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.423216 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpst2\" (UniqueName: \"kubernetes.io/projected/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-kube-api-access-mpst2\") pod \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.423260 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-catalog-content\") pod \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.423343 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-utilities\") pod \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\" (UID: \"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2\") " Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.423971 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-utilities" (OuterVolumeSpecName: "utilities") pod "561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" (UID: "561ebc2c-3509-4ce3-b6a1-8cbb664c19f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.424157 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.429551 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-kube-api-access-mpst2" (OuterVolumeSpecName: "kube-api-access-mpst2") pod "561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" (UID: "561ebc2c-3509-4ce3-b6a1-8cbb664c19f2"). InnerVolumeSpecName "kube-api-access-mpst2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.438255 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" (UID: "561ebc2c-3509-4ce3-b6a1-8cbb664c19f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.530744 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpst2\" (UniqueName: \"kubernetes.io/projected/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-kube-api-access-mpst2\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.530773 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.844991 4693 generic.go:334] "Generic (PLEG): container finished" podID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerID="392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e" exitCode=0 Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.845058 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerDied","Data":"392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e"} Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.845365 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-z4kwx" event={"ID":"561ebc2c-3509-4ce3-b6a1-8cbb664c19f2","Type":"ContainerDied","Data":"5eb90202fb9987b471c544b86a701d815b69a19da29c32ecc425755158e50d45"} Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.845084 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-z4kwx" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.845406 4693 scope.go:117] "RemoveContainer" containerID="392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.868797 4693 scope.go:117] "RemoveContainer" containerID="417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.874874 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4kwx"] Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.880872 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-z4kwx"] Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.900873 4693 scope.go:117] "RemoveContainer" containerID="ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.920145 4693 scope.go:117] "RemoveContainer" containerID="392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e" Nov 22 09:48:37 crc kubenswrapper[4693]: E1122 09:48:37.920438 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e\": container with ID starting with 392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e not found: ID does not exist" containerID="392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.920477 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e"} err="failed to get container status \"392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e\": rpc error: code = NotFound desc = could not find container \"392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e\": container with ID starting with 392c79e66ff7fab66b80a7578942e7c4b16d4307457c826d33bc4947965a480e not found: ID does not exist" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.920504 4693 scope.go:117] "RemoveContainer" containerID="417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03" Nov 22 09:48:37 crc kubenswrapper[4693]: E1122 09:48:37.920906 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03\": container with ID starting with 417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03 not found: ID does not exist" containerID="417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.920932 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03"} err="failed to get container status \"417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03\": rpc error: code = NotFound desc = could not find container \"417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03\": container with ID starting with 417a5ab62a72e7fbb99917623a2e504218fc7664cee5565e7d51da825455dd03 not found: ID does not exist" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.920948 4693 scope.go:117] "RemoveContainer" containerID="ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3" Nov 22 09:48:37 crc kubenswrapper[4693]: E1122 09:48:37.921266 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3\": container with ID starting with ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3 not found: ID does not exist" containerID="ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3" Nov 22 09:48:37 crc kubenswrapper[4693]: I1122 09:48:37.921294 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3"} err="failed to get container status \"ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3\": rpc error: code = NotFound desc = could not find container \"ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3\": container with ID starting with ca89c2e0ba726e064ef7fb1db0c13667825387fb4e6b4af034581f36a1ca97d3 not found: ID does not exist" Nov 22 09:48:38 crc kubenswrapper[4693]: I1122 09:48:38.164658 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" path="/var/lib/kubelet/pods/561ebc2c-3509-4ce3-b6a1-8cbb664c19f2/volumes" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.543995 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lnsf2/must-gather-9rtr5"] Nov 22 09:48:54 crc kubenswrapper[4693]: E1122 09:48:54.544934 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="extract-utilities" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.544952 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="extract-utilities" Nov 22 09:48:54 crc kubenswrapper[4693]: E1122 09:48:54.544985 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="extract-content" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.544991 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="extract-content" Nov 22 09:48:54 crc kubenswrapper[4693]: E1122 09:48:54.545017 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="registry-server" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.545023 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="registry-server" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.545273 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="561ebc2c-3509-4ce3-b6a1-8cbb664c19f2" containerName="registry-server" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.546305 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.548718 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-lnsf2"/"openshift-service-ca.crt" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.549333 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-lnsf2"/"kube-root-ca.crt" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.558332 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-lnsf2/must-gather-9rtr5"] Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.672173 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/f86a081c-771d-447d-a212-8171f9360f84-must-gather-output\") pod \"must-gather-9rtr5\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.672484 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqfxr\" (UniqueName: \"kubernetes.io/projected/f86a081c-771d-447d-a212-8171f9360f84-kube-api-access-tqfxr\") pod \"must-gather-9rtr5\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.775488 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/f86a081c-771d-447d-a212-8171f9360f84-must-gather-output\") pod \"must-gather-9rtr5\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.775769 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqfxr\" (UniqueName: \"kubernetes.io/projected/f86a081c-771d-447d-a212-8171f9360f84-kube-api-access-tqfxr\") pod \"must-gather-9rtr5\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.775959 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/f86a081c-771d-447d-a212-8171f9360f84-must-gather-output\") pod \"must-gather-9rtr5\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.799911 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqfxr\" (UniqueName: \"kubernetes.io/projected/f86a081c-771d-447d-a212-8171f9360f84-kube-api-access-tqfxr\") pod \"must-gather-9rtr5\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:54 crc kubenswrapper[4693]: I1122 09:48:54.869061 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:48:55 crc kubenswrapper[4693]: I1122 09:48:55.331629 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-lnsf2/must-gather-9rtr5"] Nov 22 09:48:56 crc kubenswrapper[4693]: I1122 09:48:56.020054 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" event={"ID":"f86a081c-771d-447d-a212-8171f9360f84","Type":"ContainerStarted","Data":"e93aa239a2726479cf8617460ada15a91193381bf3cf115c7b36cb3d02f6c66b"} Nov 22 09:49:01 crc kubenswrapper[4693]: I1122 09:49:01.093803 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" event={"ID":"f86a081c-771d-447d-a212-8171f9360f84","Type":"ContainerStarted","Data":"be31e6c14553b6749e751b69a311097990755535e8e342a931f719fd3f8c1601"} Nov 22 09:49:02 crc kubenswrapper[4693]: I1122 09:49:02.105068 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" event={"ID":"f86a081c-771d-447d-a212-8171f9360f84","Type":"ContainerStarted","Data":"b8e8d1e0633e2237fc39f44938c6841522dab5720f5aad84d1dcf607ffff1282"} Nov 22 09:49:02 crc kubenswrapper[4693]: I1122 09:49:02.127540 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" podStartSLOduration=2.675521693 podStartE2EDuration="8.12752616s" podCreationTimestamp="2025-11-22 09:48:54 +0000 UTC" firstStartedPulling="2025-11-22 09:48:55.340870092 +0000 UTC m=+2731.483372383" lastFinishedPulling="2025-11-22 09:49:00.792874559 +0000 UTC m=+2736.935376850" observedRunningTime="2025-11-22 09:49:02.120590178 +0000 UTC m=+2738.263092468" watchObservedRunningTime="2025-11-22 09:49:02.12752616 +0000 UTC m=+2738.270028451" Nov 22 09:49:02 crc kubenswrapper[4693]: E1122 09:49:02.938729 4693 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.249:51406->192.168.25.249:44515: write tcp 192.168.25.249:51406->192.168.25.249:44515: write: broken pipe Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.092621 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-2z7b7"] Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.094392 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.096391 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-lnsf2"/"default-dockercfg-wq6d5" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.292330 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc56r\" (UniqueName: \"kubernetes.io/projected/d84ea63c-5788-48f8-9970-8ba1950c2a5f-kube-api-access-zc56r\") pod \"crc-debug-2z7b7\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.292731 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d84ea63c-5788-48f8-9970-8ba1950c2a5f-host\") pod \"crc-debug-2z7b7\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.395248 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc56r\" (UniqueName: \"kubernetes.io/projected/d84ea63c-5788-48f8-9970-8ba1950c2a5f-kube-api-access-zc56r\") pod \"crc-debug-2z7b7\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.395402 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d84ea63c-5788-48f8-9970-8ba1950c2a5f-host\") pod \"crc-debug-2z7b7\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.395534 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d84ea63c-5788-48f8-9970-8ba1950c2a5f-host\") pod \"crc-debug-2z7b7\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.426217 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc56r\" (UniqueName: \"kubernetes.io/projected/d84ea63c-5788-48f8-9970-8ba1950c2a5f-kube-api-access-zc56r\") pod \"crc-debug-2z7b7\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: I1122 09:49:04.709924 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:04 crc kubenswrapper[4693]: W1122 09:49:04.740485 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd84ea63c_5788_48f8_9970_8ba1950c2a5f.slice/crio-e0749dfe2fe9b8c798de21e6aca293aef02ae0e75ebb1ff6e782f31285822f6a WatchSource:0}: Error finding container e0749dfe2fe9b8c798de21e6aca293aef02ae0e75ebb1ff6e782f31285822f6a: Status 404 returned error can't find the container with id e0749dfe2fe9b8c798de21e6aca293aef02ae0e75ebb1ff6e782f31285822f6a Nov 22 09:49:05 crc kubenswrapper[4693]: I1122 09:49:05.130954 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" event={"ID":"d84ea63c-5788-48f8-9970-8ba1950c2a5f","Type":"ContainerStarted","Data":"e0749dfe2fe9b8c798de21e6aca293aef02ae0e75ebb1ff6e782f31285822f6a"} Nov 22 09:49:16 crc kubenswrapper[4693]: I1122 09:49:16.266909 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" event={"ID":"d84ea63c-5788-48f8-9970-8ba1950c2a5f","Type":"ContainerStarted","Data":"ea7c768533b898e9241a7ab8a510da36453a9595b3906c69155420323e96868c"} Nov 22 09:49:16 crc kubenswrapper[4693]: I1122 09:49:16.288614 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" podStartSLOduration=1.386928097 podStartE2EDuration="12.288595275s" podCreationTimestamp="2025-11-22 09:49:04 +0000 UTC" firstStartedPulling="2025-11-22 09:49:04.742514075 +0000 UTC m=+2740.885016367" lastFinishedPulling="2025-11-22 09:49:15.644181254 +0000 UTC m=+2751.786683545" observedRunningTime="2025-11-22 09:49:16.284648183 +0000 UTC m=+2752.427150474" watchObservedRunningTime="2025-11-22 09:49:16.288595275 +0000 UTC m=+2752.431097566" Nov 22 09:49:49 crc kubenswrapper[4693]: I1122 09:49:49.544442 4693 generic.go:334] "Generic (PLEG): container finished" podID="d84ea63c-5788-48f8-9970-8ba1950c2a5f" containerID="ea7c768533b898e9241a7ab8a510da36453a9595b3906c69155420323e96868c" exitCode=0 Nov 22 09:49:49 crc kubenswrapper[4693]: I1122 09:49:49.544520 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" event={"ID":"d84ea63c-5788-48f8-9970-8ba1950c2a5f","Type":"ContainerDied","Data":"ea7c768533b898e9241a7ab8a510da36453a9595b3906c69155420323e96868c"} Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.636491 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.644275 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d84ea63c-5788-48f8-9970-8ba1950c2a5f-host\") pod \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.644354 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc56r\" (UniqueName: \"kubernetes.io/projected/d84ea63c-5788-48f8-9970-8ba1950c2a5f-kube-api-access-zc56r\") pod \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\" (UID: \"d84ea63c-5788-48f8-9970-8ba1950c2a5f\") " Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.644388 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d84ea63c-5788-48f8-9970-8ba1950c2a5f-host" (OuterVolumeSpecName: "host") pod "d84ea63c-5788-48f8-9970-8ba1950c2a5f" (UID: "d84ea63c-5788-48f8-9970-8ba1950c2a5f"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.645317 4693 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/d84ea63c-5788-48f8-9970-8ba1950c2a5f-host\") on node \"crc\" DevicePath \"\"" Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.654136 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d84ea63c-5788-48f8-9970-8ba1950c2a5f-kube-api-access-zc56r" (OuterVolumeSpecName: "kube-api-access-zc56r") pod "d84ea63c-5788-48f8-9970-8ba1950c2a5f" (UID: "d84ea63c-5788-48f8-9970-8ba1950c2a5f"). InnerVolumeSpecName "kube-api-access-zc56r". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.665518 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-2z7b7"] Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.670584 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-2z7b7"] Nov 22 09:49:50 crc kubenswrapper[4693]: I1122 09:49:50.746471 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc56r\" (UniqueName: \"kubernetes.io/projected/d84ea63c-5788-48f8-9970-8ba1950c2a5f-kube-api-access-zc56r\") on node \"crc\" DevicePath \"\"" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.563790 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e0749dfe2fe9b8c798de21e6aca293aef02ae0e75ebb1ff6e782f31285822f6a" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.563876 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-2z7b7" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.817027 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-h7x8c"] Nov 22 09:49:51 crc kubenswrapper[4693]: E1122 09:49:51.817933 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d84ea63c-5788-48f8-9970-8ba1950c2a5f" containerName="container-00" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.817950 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="d84ea63c-5788-48f8-9970-8ba1950c2a5f" containerName="container-00" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.818264 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="d84ea63c-5788-48f8-9970-8ba1950c2a5f" containerName="container-00" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.819243 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.821284 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-lnsf2"/"default-dockercfg-wq6d5" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.975221 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e1df1f1a-354d-409b-af57-0c2761716354-host\") pod \"crc-debug-h7x8c\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:51 crc kubenswrapper[4693]: I1122 09:49:51.975441 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ndng\" (UniqueName: \"kubernetes.io/projected/e1df1f1a-354d-409b-af57-0c2761716354-kube-api-access-6ndng\") pod \"crc-debug-h7x8c\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.077500 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ndng\" (UniqueName: \"kubernetes.io/projected/e1df1f1a-354d-409b-af57-0c2761716354-kube-api-access-6ndng\") pod \"crc-debug-h7x8c\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.077822 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e1df1f1a-354d-409b-af57-0c2761716354-host\") pod \"crc-debug-h7x8c\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.078034 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e1df1f1a-354d-409b-af57-0c2761716354-host\") pod \"crc-debug-h7x8c\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.095429 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ndng\" (UniqueName: \"kubernetes.io/projected/e1df1f1a-354d-409b-af57-0c2761716354-kube-api-access-6ndng\") pod \"crc-debug-h7x8c\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.136549 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.158499 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d84ea63c-5788-48f8-9970-8ba1950c2a5f" path="/var/lib/kubelet/pods/d84ea63c-5788-48f8-9970-8ba1950c2a5f/volumes" Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.574620 4693 generic.go:334] "Generic (PLEG): container finished" podID="e1df1f1a-354d-409b-af57-0c2761716354" containerID="a6371d274798c459e83d0d71a3174f6500b149250879719a9c1bf63f3d7b16d5" exitCode=0 Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.574691 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" event={"ID":"e1df1f1a-354d-409b-af57-0c2761716354","Type":"ContainerDied","Data":"a6371d274798c459e83d0d71a3174f6500b149250879719a9c1bf63f3d7b16d5"} Nov 22 09:49:52 crc kubenswrapper[4693]: I1122 09:49:52.575061 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" event={"ID":"e1df1f1a-354d-409b-af57-0c2761716354","Type":"ContainerStarted","Data":"6d5c424dcfef98766419075e9a3904036a60cf77c779142820285812862b2af8"} Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.155913 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-h7x8c"] Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.164260 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-h7x8c"] Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.659092 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.814550 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ndng\" (UniqueName: \"kubernetes.io/projected/e1df1f1a-354d-409b-af57-0c2761716354-kube-api-access-6ndng\") pod \"e1df1f1a-354d-409b-af57-0c2761716354\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.814614 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e1df1f1a-354d-409b-af57-0c2761716354-host\") pod \"e1df1f1a-354d-409b-af57-0c2761716354\" (UID: \"e1df1f1a-354d-409b-af57-0c2761716354\") " Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.814915 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e1df1f1a-354d-409b-af57-0c2761716354-host" (OuterVolumeSpecName: "host") pod "e1df1f1a-354d-409b-af57-0c2761716354" (UID: "e1df1f1a-354d-409b-af57-0c2761716354"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.819765 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1df1f1a-354d-409b-af57-0c2761716354-kube-api-access-6ndng" (OuterVolumeSpecName: "kube-api-access-6ndng") pod "e1df1f1a-354d-409b-af57-0c2761716354" (UID: "e1df1f1a-354d-409b-af57-0c2761716354"). InnerVolumeSpecName "kube-api-access-6ndng". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.917432 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ndng\" (UniqueName: \"kubernetes.io/projected/e1df1f1a-354d-409b-af57-0c2761716354-kube-api-access-6ndng\") on node \"crc\" DevicePath \"\"" Nov 22 09:49:53 crc kubenswrapper[4693]: I1122 09:49:53.917472 4693 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/e1df1f1a-354d-409b-af57-0c2761716354-host\") on node \"crc\" DevicePath \"\"" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.167535 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1df1f1a-354d-409b-af57-0c2761716354" path="/var/lib/kubelet/pods/e1df1f1a-354d-409b-af57-0c2761716354/volumes" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.321725 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-bgjgh"] Nov 22 09:49:54 crc kubenswrapper[4693]: E1122 09:49:54.322673 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1df1f1a-354d-409b-af57-0c2761716354" containerName="container-00" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.322701 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1df1f1a-354d-409b-af57-0c2761716354" containerName="container-00" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.323095 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1df1f1a-354d-409b-af57-0c2761716354" containerName="container-00" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.324477 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.424687 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sc2sq\" (UniqueName: \"kubernetes.io/projected/946d180a-7531-498f-8ba8-9d03e6abdba1-kube-api-access-sc2sq\") pod \"crc-debug-bgjgh\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.424735 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/946d180a-7531-498f-8ba8-9d03e6abdba1-host\") pod \"crc-debug-bgjgh\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.526564 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sc2sq\" (UniqueName: \"kubernetes.io/projected/946d180a-7531-498f-8ba8-9d03e6abdba1-kube-api-access-sc2sq\") pod \"crc-debug-bgjgh\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.526616 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/946d180a-7531-498f-8ba8-9d03e6abdba1-host\") pod \"crc-debug-bgjgh\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.526814 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/946d180a-7531-498f-8ba8-9d03e6abdba1-host\") pod \"crc-debug-bgjgh\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.543915 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sc2sq\" (UniqueName: \"kubernetes.io/projected/946d180a-7531-498f-8ba8-9d03e6abdba1-kube-api-access-sc2sq\") pod \"crc-debug-bgjgh\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.595371 4693 scope.go:117] "RemoveContainer" containerID="a6371d274798c459e83d0d71a3174f6500b149250879719a9c1bf63f3d7b16d5" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.595416 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-h7x8c" Nov 22 09:49:54 crc kubenswrapper[4693]: I1122 09:49:54.648974 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:54 crc kubenswrapper[4693]: W1122 09:49:54.674607 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod946d180a_7531_498f_8ba8_9d03e6abdba1.slice/crio-c17560bd150ce0e29ab25033e0e8ae09edae26361a3e927546297d2e0d622970 WatchSource:0}: Error finding container c17560bd150ce0e29ab25033e0e8ae09edae26361a3e927546297d2e0d622970: Status 404 returned error can't find the container with id c17560bd150ce0e29ab25033e0e8ae09edae26361a3e927546297d2e0d622970 Nov 22 09:49:55 crc kubenswrapper[4693]: I1122 09:49:55.607196 4693 generic.go:334] "Generic (PLEG): container finished" podID="946d180a-7531-498f-8ba8-9d03e6abdba1" containerID="37d7d72e9f85f8f9f055cbcde0989aca373510b3854ad104851c718dffb7c07d" exitCode=0 Nov 22 09:49:55 crc kubenswrapper[4693]: I1122 09:49:55.607275 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" event={"ID":"946d180a-7531-498f-8ba8-9d03e6abdba1","Type":"ContainerDied","Data":"37d7d72e9f85f8f9f055cbcde0989aca373510b3854ad104851c718dffb7c07d"} Nov 22 09:49:55 crc kubenswrapper[4693]: I1122 09:49:55.607559 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" event={"ID":"946d180a-7531-498f-8ba8-9d03e6abdba1","Type":"ContainerStarted","Data":"c17560bd150ce0e29ab25033e0e8ae09edae26361a3e927546297d2e0d622970"} Nov 22 09:49:55 crc kubenswrapper[4693]: I1122 09:49:55.645139 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-bgjgh"] Nov 22 09:49:55 crc kubenswrapper[4693]: I1122 09:49:55.652110 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lnsf2/crc-debug-bgjgh"] Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.691788 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.876797 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sc2sq\" (UniqueName: \"kubernetes.io/projected/946d180a-7531-498f-8ba8-9d03e6abdba1-kube-api-access-sc2sq\") pod \"946d180a-7531-498f-8ba8-9d03e6abdba1\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.876861 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/946d180a-7531-498f-8ba8-9d03e6abdba1-host\") pod \"946d180a-7531-498f-8ba8-9d03e6abdba1\" (UID: \"946d180a-7531-498f-8ba8-9d03e6abdba1\") " Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.877012 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/946d180a-7531-498f-8ba8-9d03e6abdba1-host" (OuterVolumeSpecName: "host") pod "946d180a-7531-498f-8ba8-9d03e6abdba1" (UID: "946d180a-7531-498f-8ba8-9d03e6abdba1"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.877727 4693 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/946d180a-7531-498f-8ba8-9d03e6abdba1-host\") on node \"crc\" DevicePath \"\"" Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.882002 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/946d180a-7531-498f-8ba8-9d03e6abdba1-kube-api-access-sc2sq" (OuterVolumeSpecName: "kube-api-access-sc2sq") pod "946d180a-7531-498f-8ba8-9d03e6abdba1" (UID: "946d180a-7531-498f-8ba8-9d03e6abdba1"). InnerVolumeSpecName "kube-api-access-sc2sq". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:49:56 crc kubenswrapper[4693]: I1122 09:49:56.980711 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sc2sq\" (UniqueName: \"kubernetes.io/projected/946d180a-7531-498f-8ba8-9d03e6abdba1-kube-api-access-sc2sq\") on node \"crc\" DevicePath \"\"" Nov 22 09:49:57 crc kubenswrapper[4693]: I1122 09:49:57.622916 4693 scope.go:117] "RemoveContainer" containerID="37d7d72e9f85f8f9f055cbcde0989aca373510b3854ad104851c718dffb7c07d" Nov 22 09:49:57 crc kubenswrapper[4693]: I1122 09:49:57.622958 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/crc-debug-bgjgh" Nov 22 09:49:58 crc kubenswrapper[4693]: I1122 09:49:58.155431 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="946d180a-7531-498f-8ba8-9d03e6abdba1" path="/var/lib/kubelet/pods/946d180a-7531-498f-8ba8-9d03e6abdba1/volumes" Nov 22 09:50:00 crc kubenswrapper[4693]: I1122 09:50:00.246805 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:50:00 crc kubenswrapper[4693]: I1122 09:50:00.247234 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.176806 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6875cccfb6-frgx5_01894220-3ce0-4535-9d19-95f573987428/barbican-api/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.260107 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6875cccfb6-frgx5_01894220-3ce0-4535-9d19-95f573987428/barbican-api-log/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.338483 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-64777f45c8-bwpgz_2ea61845-fe45-4eed-a854-92545e309870/barbican-keystone-listener/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.403949 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-64777f45c8-bwpgz_2ea61845-fe45-4eed-a854-92545e309870/barbican-keystone-listener-log/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.444634 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f9b77d547-xf85h_1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40/barbican-worker/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.507952 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f9b77d547-xf85h_1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40/barbican-worker-log/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.629680 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2_3854e608-0001-4cad-bf75-68cfa7a1486f/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.696275 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/ceilometer-central-agent/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.772619 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/ceilometer-notification-agent/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.824286 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/proxy-httpd/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.851050 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/sg-core/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.969087 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ab074cff-55c1-4cc2-ac57-05c7948418c0/cinder-api/0.log" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.970826 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jrbv8"] Nov 22 09:50:08 crc kubenswrapper[4693]: E1122 09:50:08.971291 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="946d180a-7531-498f-8ba8-9d03e6abdba1" containerName="container-00" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.971312 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="946d180a-7531-498f-8ba8-9d03e6abdba1" containerName="container-00" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.972071 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="946d180a-7531-498f-8ba8-9d03e6abdba1" containerName="container-00" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.974787 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:08 crc kubenswrapper[4693]: I1122 09:50:08.986603 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jrbv8"] Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.022468 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-catalog-content\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.022579 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-utilities\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.022639 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6xjnz\" (UniqueName: \"kubernetes.io/projected/edbd37fc-c87e-468d-aa17-160f5f37707f-kube-api-access-6xjnz\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.039245 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ab074cff-55c1-4cc2-ac57-05c7948418c0/cinder-api-log/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.124810 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-utilities\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.124914 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6xjnz\" (UniqueName: \"kubernetes.io/projected/edbd37fc-c87e-468d-aa17-160f5f37707f-kube-api-access-6xjnz\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.125366 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-catalog-content\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.125377 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-utilities\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.125751 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-catalog-content\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.141912 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6xjnz\" (UniqueName: \"kubernetes.io/projected/edbd37fc-c87e-468d-aa17-160f5f37707f-kube-api-access-6xjnz\") pod \"redhat-operators-jrbv8\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.145112 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5284f375-48e8-4caa-a5f9-fa762001ce69/cinder-scheduler/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.276960 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5284f375-48e8-4caa-a5f9-fa762001ce69/probe/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.291290 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8_a63528a4-99a1-4adc-8947-d914a74e4d8b/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.291561 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.578421 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7_7bd4f766-0c89-4aed-9f9a-15657c6e1efa/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.660231 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d5cf5b645-s8pnc_ccb21e39-74b5-4a8f-ad6a-8c1dede8e334/init/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.748724 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jrbv8"] Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.909774 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d5cf5b645-s8pnc_ccb21e39-74b5-4a8f-ad6a-8c1dede8e334/init/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.948184 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d5cf5b645-s8pnc_ccb21e39-74b5-4a8f-ad6a-8c1dede8e334/dnsmasq-dns/0.log" Nov 22 09:50:09 crc kubenswrapper[4693]: I1122 09:50:09.964636 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn_ac0f3797-4b0b-4b88-8624-95e289cf2386/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.176615 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_722dcbf6-ae9d-444d-9839-24b0b0e900db/glance-log/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.284178 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_722dcbf6-ae9d-444d-9839-24b0b0e900db/glance-httpd/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.401771 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2586c0ae-8b14-49b8-8787-d14da284b596/glance-httpd/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.467376 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2586c0ae-8b14-49b8-8787-d14da284b596/glance-log/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.639305 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7fd67558f8-nzmxr_dd3b9780-060c-4788-9800-20c1ac3b2e95/horizon/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.736710 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5_118d3e39-2ce7-4a88-b9c7-869e5e83a568/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.750054 4693 generic.go:334] "Generic (PLEG): container finished" podID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerID="7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16" exitCode=0 Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.750106 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerDied","Data":"7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16"} Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.750138 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerStarted","Data":"afc31516ed775594f73aaa843c8477017326ab415b38dfa58db1e165d99f8716"} Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.812680 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7fd67558f8-nzmxr_dd3b9780-060c-4788-9800-20c1ac3b2e95/horizon-log/0.log" Nov 22 09:50:10 crc kubenswrapper[4693]: I1122 09:50:10.897411 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-lwtlp_2a20394f-6ab1-4de1-aa64-df5a655364bb/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.076347 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_ff38a410-4f38-40dc-afa6-2d2cca6054ca/kube-state-metrics/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.143552 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5947459fbf-s5xqj_5d2c816b-f30a-4a96-8f72-5f023d95e3be/keystone-api/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.288649 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m_537a26f3-39a2-48e4-af18-39d0e944c4b0/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.530172 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7fbc84ccfc-8tdp6_bd35a927-864c-4718-891f-1a036a99ddfb/neutron-api/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.573247 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7fbc84ccfc-8tdp6_bd35a927-864c-4718-891f-1a036a99ddfb/neutron-httpd/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.737979 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt_ecdf1a42-8112-4bcc-b356-e364d56b93bb/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:11 crc kubenswrapper[4693]: I1122 09:50:11.766001 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerStarted","Data":"56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d"} Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.141115 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_a4da75dc-7806-41ea-9fdd-d7ed1ca362d3/nova-cell0-conductor-conductor/0.log" Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.162563 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5079844b-9574-4583-97b2-8232271e5681/nova-api-log/0.log" Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.268900 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5079844b-9574-4583-97b2-8232271e5681/nova-api-api/0.log" Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.495453 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_ca2d255f-4890-4fa9-85e7-47ab34607956/nova-cell1-conductor-conductor/0.log" Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.501797 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_7135aae5-46b3-4654-aba4-70a6f2df0d1e/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.609689 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-vt57h_75221b44-0170-4231-b768-ad88de26addb/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:12 crc kubenswrapper[4693]: I1122 09:50:12.828516 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1569b6f7-3def-4eb6-87e7-5705b74b1fed/nova-metadata-log/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.037529 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_957b6aef-f771-4011-b712-e53794ad836a/mysql-bootstrap/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.048497 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_d0427665-286b-4058-bec9-917d31c200e3/nova-scheduler-scheduler/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.298619 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_957b6aef-f771-4011-b712-e53794ad836a/mysql-bootstrap/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.339676 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_957b6aef-f771-4011-b712-e53794ad836a/galera/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.486233 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d64e5e3-8006-4833-a05d-705799403cc2/mysql-bootstrap/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.520482 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1569b6f7-3def-4eb6-87e7-5705b74b1fed/nova-metadata-metadata/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.691325 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d64e5e3-8006-4833-a05d-705799403cc2/mysql-bootstrap/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.718857 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d/openstackclient/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.739606 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d64e5e3-8006-4833-a05d-705799403cc2/galera/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.779964 4693 generic.go:334] "Generic (PLEG): container finished" podID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerID="56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d" exitCode=0 Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.780007 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerDied","Data":"56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d"} Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.931797 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-czsjw_6ebf2de1-2769-47bc-8136-4ff7460b89b1/ovn-controller/0.log" Nov 22 09:50:13 crc kubenswrapper[4693]: I1122 09:50:13.979554 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-h8klh_9f9e4335-a9aa-4a2c-8300-25680a90ab8a/openstack-network-exporter/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.123258 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovsdb-server-init/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.314340 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovsdb-server-init/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.324198 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovsdb-server/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.342315 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovs-vswitchd/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.525577 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-gtdvj_e06049f5-cf35-4685-9cae-cf2c1cfa2dda/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.587468 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7728d59-f897-43e3-a7d6-7d1704f41739/openstack-network-exporter/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.669241 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7728d59-f897-43e3-a7d6-7d1704f41739/ovn-northd/0.log" Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.789267 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerStarted","Data":"5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c"} Nov 22 09:50:14 crc kubenswrapper[4693]: I1122 09:50:14.810790 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jrbv8" podStartSLOduration=3.27128015 podStartE2EDuration="6.810771392s" podCreationTimestamp="2025-11-22 09:50:08 +0000 UTC" firstStartedPulling="2025-11-22 09:50:10.752264455 +0000 UTC m=+2806.894766746" lastFinishedPulling="2025-11-22 09:50:14.291755697 +0000 UTC m=+2810.434257988" observedRunningTime="2025-11-22 09:50:14.806134662 +0000 UTC m=+2810.948636954" watchObservedRunningTime="2025-11-22 09:50:14.810771392 +0000 UTC m=+2810.953273673" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.024674 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_645f7714-f41d-4ece-85ef-8043bc2ca51d/ovsdbserver-nb/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.071611 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_645f7714-f41d-4ece-85ef-8043bc2ca51d/openstack-network-exporter/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.161790 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_db7c316e-a7ee-4c1a-a663-b02279df3b3e/openstack-network-exporter/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.262583 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_db7c316e-a7ee-4c1a-a663-b02279df3b3e/ovsdbserver-sb/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.447015 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-9f6899d66-t5sml_5b49963a-32e1-4500-969a-b7feaa78d4d3/placement-log/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.460821 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-9f6899d66-t5sml_5b49963a-32e1-4500-969a-b7feaa78d4d3/placement-api/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.638007 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b0753e7f-679e-4da7-a765-d1d220684511/setup-container/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.884310 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b0753e7f-679e-4da7-a765-d1d220684511/rabbitmq/0.log" Nov 22 09:50:15 crc kubenswrapper[4693]: I1122 09:50:15.942389 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_95a8e4f8-d504-40f1-8137-34a70c82e9cb/setup-container/0.log" Nov 22 09:50:16 crc kubenswrapper[4693]: I1122 09:50:16.107898 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b0753e7f-679e-4da7-a765-d1d220684511/setup-container/0.log" Nov 22 09:50:16 crc kubenswrapper[4693]: I1122 09:50:16.495092 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_95a8e4f8-d504-40f1-8137-34a70c82e9cb/setup-container/0.log" Nov 22 09:50:16 crc kubenswrapper[4693]: I1122 09:50:16.509469 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_95a8e4f8-d504-40f1-8137-34a70c82e9cb/rabbitmq/0.log" Nov 22 09:50:16 crc kubenswrapper[4693]: I1122 09:50:16.523378 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t_b32380c7-b430-47aa-8694-054df2442f2b/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:16 crc kubenswrapper[4693]: I1122 09:50:16.755583 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-kblb8_7dd57e37-dbd8-422b-9ce9-ba054526ddd3/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:16 crc kubenswrapper[4693]: I1122 09:50:16.896407 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r_7529ff10-b67c-4bd3-aa41-46de267c73f3/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.158778 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-lxlhk_da9ee799-029a-485c-a5b6-bbdc64697c71/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.227934 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-fd2vc_12096b4e-7f75-4fcb-be29-331232a1e64b/ssh-known-hosts-edpm-deployment/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.422451 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-795d6c4bbf-rtw6h_51c40064-56a3-4186-bf60-6181686b256d/proxy-server/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.486931 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-795d6c4bbf-rtw6h_51c40064-56a3-4186-bf60-6181686b256d/proxy-httpd/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.568309 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-p9zqf_23436668-ec00-4623-b854-aec77bf25158/swift-ring-rebalance/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.694384 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-auditor/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.735050 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-reaper/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.819608 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-replicator/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.848909 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-server/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.961956 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-auditor/0.log" Nov 22 09:50:17 crc kubenswrapper[4693]: I1122 09:50:17.984004 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-replicator/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.098034 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-server/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.140576 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-auditor/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.153369 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-updater/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.159805 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-expirer/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.322195 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-replicator/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.376337 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-server/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.388824 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/rsync/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.420736 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-updater/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.567640 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/swift-recon-cron/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.596007 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz_021fbba7-764a-4284-a4f1-1b8db668d9fd/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.762069 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_fe7aa61b-ddcb-48b3-9d95-c203790e13e5/tempest-tests-tempest-tests-runner/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.860520 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_1eef7711-f623-4d45-bf25-449b7cbc4e53/test-operator-logs-container/0.log" Nov 22 09:50:18 crc kubenswrapper[4693]: I1122 09:50:18.981945 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-dr42c_a19ea96b-8910-41e1-a8c6-901206473d72/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:50:19 crc kubenswrapper[4693]: I1122 09:50:19.291724 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:19 crc kubenswrapper[4693]: I1122 09:50:19.291765 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:20 crc kubenswrapper[4693]: I1122 09:50:20.330001 4693 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-jrbv8" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="registry-server" probeResult="failure" output=< Nov 22 09:50:20 crc kubenswrapper[4693]: timeout: failed to connect service ":50051" within 1s Nov 22 09:50:20 crc kubenswrapper[4693]: > Nov 22 09:50:25 crc kubenswrapper[4693]: I1122 09:50:25.572820 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_cbaee87f-e8c6-4e56-9b59-c0f50054c172/memcached/0.log" Nov 22 09:50:29 crc kubenswrapper[4693]: I1122 09:50:29.337072 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:29 crc kubenswrapper[4693]: I1122 09:50:29.389790 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:30 crc kubenswrapper[4693]: I1122 09:50:30.246834 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:50:30 crc kubenswrapper[4693]: I1122 09:50:30.247387 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:50:30 crc kubenswrapper[4693]: I1122 09:50:30.362357 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jrbv8"] Nov 22 09:50:30 crc kubenswrapper[4693]: I1122 09:50:30.920809 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jrbv8" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="registry-server" containerID="cri-o://5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c" gracePeriod=2 Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.346055 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.461356 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-catalog-content\") pod \"edbd37fc-c87e-468d-aa17-160f5f37707f\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.461517 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-utilities\") pod \"edbd37fc-c87e-468d-aa17-160f5f37707f\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.461553 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6xjnz\" (UniqueName: \"kubernetes.io/projected/edbd37fc-c87e-468d-aa17-160f5f37707f-kube-api-access-6xjnz\") pod \"edbd37fc-c87e-468d-aa17-160f5f37707f\" (UID: \"edbd37fc-c87e-468d-aa17-160f5f37707f\") " Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.462165 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-utilities" (OuterVolumeSpecName: "utilities") pod "edbd37fc-c87e-468d-aa17-160f5f37707f" (UID: "edbd37fc-c87e-468d-aa17-160f5f37707f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.467504 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edbd37fc-c87e-468d-aa17-160f5f37707f-kube-api-access-6xjnz" (OuterVolumeSpecName: "kube-api-access-6xjnz") pod "edbd37fc-c87e-468d-aa17-160f5f37707f" (UID: "edbd37fc-c87e-468d-aa17-160f5f37707f"). InnerVolumeSpecName "kube-api-access-6xjnz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.536521 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "edbd37fc-c87e-468d-aa17-160f5f37707f" (UID: "edbd37fc-c87e-468d-aa17-160f5f37707f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.564099 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.564129 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/edbd37fc-c87e-468d-aa17-160f5f37707f-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.564141 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6xjnz\" (UniqueName: \"kubernetes.io/projected/edbd37fc-c87e-468d-aa17-160f5f37707f-kube-api-access-6xjnz\") on node \"crc\" DevicePath \"\"" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.933690 4693 generic.go:334] "Generic (PLEG): container finished" podID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerID="5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c" exitCode=0 Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.933795 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jrbv8" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.933805 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerDied","Data":"5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c"} Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.934312 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jrbv8" event={"ID":"edbd37fc-c87e-468d-aa17-160f5f37707f","Type":"ContainerDied","Data":"afc31516ed775594f73aaa843c8477017326ab415b38dfa58db1e165d99f8716"} Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.934339 4693 scope.go:117] "RemoveContainer" containerID="5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.964142 4693 scope.go:117] "RemoveContainer" containerID="56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d" Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.968707 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jrbv8"] Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.972531 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jrbv8"] Nov 22 09:50:31 crc kubenswrapper[4693]: I1122 09:50:31.999767 4693 scope.go:117] "RemoveContainer" containerID="7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.026663 4693 scope.go:117] "RemoveContainer" containerID="5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c" Nov 22 09:50:32 crc kubenswrapper[4693]: E1122 09:50:32.027112 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c\": container with ID starting with 5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c not found: ID does not exist" containerID="5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.027156 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c"} err="failed to get container status \"5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c\": rpc error: code = NotFound desc = could not find container \"5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c\": container with ID starting with 5cb203ff885adc1bfe614d43e0415fcad53fbea4a59cd2dbe023890863edfb1c not found: ID does not exist" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.027186 4693 scope.go:117] "RemoveContainer" containerID="56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d" Nov 22 09:50:32 crc kubenswrapper[4693]: E1122 09:50:32.027555 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d\": container with ID starting with 56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d not found: ID does not exist" containerID="56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.027583 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d"} err="failed to get container status \"56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d\": rpc error: code = NotFound desc = could not find container \"56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d\": container with ID starting with 56a30809352e1a70cf9b4f9513234480be2a1ecbb19847c15364b038089d1c2d not found: ID does not exist" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.027603 4693 scope.go:117] "RemoveContainer" containerID="7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16" Nov 22 09:50:32 crc kubenswrapper[4693]: E1122 09:50:32.027958 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16\": container with ID starting with 7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16 not found: ID does not exist" containerID="7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.027980 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16"} err="failed to get container status \"7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16\": rpc error: code = NotFound desc = could not find container \"7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16\": container with ID starting with 7f632fa72f6c636e0c50841fbaf0c32d435a67a3470661c44627fd0beffa7a16 not found: ID does not exist" Nov 22 09:50:32 crc kubenswrapper[4693]: I1122 09:50:32.164129 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" path="/var/lib/kubelet/pods/edbd37fc-c87e-468d-aa17-160f5f37707f/volumes" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.366113 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/util/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.504794 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/util/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.522543 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/pull/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.542936 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/pull/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.658987 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/util/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.696676 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/pull/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.708491 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/extract/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.847666 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7768f8c84f-c42f7_f1bb1578-9697-4968-b36f-b77d228fafaa/kube-rbac-proxy/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.863473 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7768f8c84f-c42f7_f1bb1578-9697-4968-b36f-b77d228fafaa/manager/0.log" Nov 22 09:50:40 crc kubenswrapper[4693]: I1122 09:50:40.893553 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6d8fd67bf7-lz798_cadb4705-8655-4f69-b00f-049e64a71b28/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.022422 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6d8fd67bf7-lz798_cadb4705-8655-4f69-b00f-049e64a71b28/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.030355 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-56dfb6b67f-r7t48_390da39a-b184-4348-9894-af8f4237aba8/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.076149 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-56dfb6b67f-r7t48_390da39a-b184-4348-9894-af8f4237aba8/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.169263 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8667fbf6f6-xh2cl_60c907e3-d225-414c-a15c-6f0a6999eb9d/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.262069 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8667fbf6f6-xh2cl_60c907e3-d225-414c-a15c-6f0a6999eb9d/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.373124 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-bf4c6585d-vts55_0d573b45-216b-4869-96f6-c460bb7ff10f/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.378548 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-bf4c6585d-vts55_0d573b45-216b-4869-96f6-c460bb7ff10f/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.422355 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d86b44686-j6jzp_e1cfaae9-e5b8-4826-9e34-6fce5657c237/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.546910 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d86b44686-j6jzp_e1cfaae9-e5b8-4826-9e34-6fce5657c237/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.582907 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-769d9c7585-9g6fk_4b168504-e6a6-48c2-a8af-dc6a44c77e59/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.711509 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-769d9c7585-9g6fk_4b168504-e6a6-48c2-a8af-dc6a44c77e59/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.719234 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5c75d7c94b-92vqr_23476d93-a604-4bc2-9e83-5c59e574436c/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.799889 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5c75d7c94b-92vqr_23476d93-a604-4bc2-9e83-5c59e574436c/manager/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.851920 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7879fb76fd-5xnb4_703d454c-2336-4941-8fe9-5b717f57423f/kube-rbac-proxy/0.log" Nov 22 09:50:41 crc kubenswrapper[4693]: I1122 09:50:41.976660 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7879fb76fd-5xnb4_703d454c-2336-4941-8fe9-5b717f57423f/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.056072 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7bb88cb858-gm5p8_9ea19f26-5477-43e9-84a2-1b8cf72f4f81/kube-rbac-proxy/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.078863 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7bb88cb858-gm5p8_9ea19f26-5477-43e9-84a2-1b8cf72f4f81/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.206486 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6f8c5b86cb-t6hdc_9acf7acc-6712-4fbc-ab1c-14a9e1076ab8/kube-rbac-proxy/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.237260 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6f8c5b86cb-t6hdc_9acf7acc-6712-4fbc-ab1c-14a9e1076ab8/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.324179 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-66b7d6f598-wd7tg_ed8b2ca0-7928-41f6-8e30-787058fa0808/kube-rbac-proxy/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.416479 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-66b7d6f598-wd7tg_ed8b2ca0-7928-41f6-8e30-787058fa0808/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.541609 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-86d796d84d-k8mcb_750a5fd2-0554-4a9d-a16b-9e82cb56694f/kube-rbac-proxy/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.545569 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-86d796d84d-k8mcb_750a5fd2-0554-4a9d-a16b-9e82cb56694f/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.608443 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6fdc856c5d-q68sw_6bad4eeb-497a-4459-af08-c6f1db9ee8bf/kube-rbac-proxy/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.717057 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6fdc856c5d-q68sw_6bad4eeb-497a-4459-af08-c6f1db9ee8bf/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.749116 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4_a100268d-89c1-412b-82a6-843711bcb44b/kube-rbac-proxy/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.807283 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4_a100268d-89c1-412b-82a6-843711bcb44b/manager/0.log" Nov 22 09:50:42 crc kubenswrapper[4693]: I1122 09:50:42.887775 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6cb9dc54f8-qv89g_261611c7-97d3-444c-bba5-e06e1593a5e4/kube-rbac-proxy/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.126272 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-8486c7f98b-hntsn_13a826c8-8c21-452b-80d9-237f609a62a5/kube-rbac-proxy/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.290760 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-8486c7f98b-hntsn_13a826c8-8c21-452b-80d9-237f609a62a5/operator/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.390012 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-5zdsq_932a4450-e8f1-4d96-acf5-1249c1f7cb07/registry-server/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.499548 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5bdf4f7f7f-h6k74_bbbb8d7a-ee06-4f2e-9982-97b6ee86801d/kube-rbac-proxy/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.565077 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5bdf4f7f7f-h6k74_bbbb8d7a-ee06-4f2e-9982-97b6ee86801d/manager/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.679447 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-6dc664666c-mqzzr_bbb5f842-47da-40fc-a082-50323f1f10f8/kube-rbac-proxy/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.700687 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-6dc664666c-mqzzr_bbb5f842-47da-40fc-a082-50323f1f10f8/manager/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.818729 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn_d7bbf03f-0d49-4144-8ea9-0303a2e5c86e/operator/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.923921 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-799cb6ffd6-vnrh2_89938523-c1a9-4f6e-aebb-396a3cd509c6/kube-rbac-proxy/0.log" Nov 22 09:50:43 crc kubenswrapper[4693]: I1122 09:50:43.971832 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6cb9dc54f8-qv89g_261611c7-97d3-444c-bba5-e06e1593a5e4/manager/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.077746 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-799cb6ffd6-vnrh2_89938523-c1a9-4f6e-aebb-396a3cd509c6/manager/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.096141 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-7798859c74-kz8xk_1b962230-a413-4f39-a8c8-bed04c898724/kube-rbac-proxy/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.144411 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-7798859c74-kz8xk_1b962230-a413-4f39-a8c8-bed04c898724/manager/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.214866 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-8464cf66df-cl5qf_3baa8fe0-513d-4a42-a83a-4cc6fbf0e938/kube-rbac-proxy/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.256612 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-8464cf66df-cl5qf_3baa8fe0-513d-4a42-a83a-4cc6fbf0e938/manager/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.316769 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7cd4fb6f79-298gp_1f579f14-5558-45aa-9fa5-da9ee0ccac02/kube-rbac-proxy/0.log" Nov 22 09:50:44 crc kubenswrapper[4693]: I1122 09:50:44.345809 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7cd4fb6f79-298gp_1f579f14-5558-45aa-9fa5-da9ee0ccac02/manager/0.log" Nov 22 09:50:57 crc kubenswrapper[4693]: I1122 09:50:57.521260 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-gqsrq_f6d86049-ce66-4900-bc42-b1ac6864e79a/control-plane-machine-set-operator/0.log" Nov 22 09:50:57 crc kubenswrapper[4693]: I1122 09:50:57.661638 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-chjcb_56f55ada-5668-4a07-888e-1c578214f660/kube-rbac-proxy/0.log" Nov 22 09:50:57 crc kubenswrapper[4693]: I1122 09:50:57.673387 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-chjcb_56f55ada-5668-4a07-888e-1c578214f660/machine-api-operator/0.log" Nov 22 09:51:00 crc kubenswrapper[4693]: I1122 09:51:00.246588 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:51:00 crc kubenswrapper[4693]: I1122 09:51:00.247039 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:51:00 crc kubenswrapper[4693]: I1122 09:51:00.247099 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:51:00 crc kubenswrapper[4693]: I1122 09:51:00.248212 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:51:00 crc kubenswrapper[4693]: I1122 09:51:00.248273 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" gracePeriod=600 Nov 22 09:51:00 crc kubenswrapper[4693]: E1122 09:51:00.368991 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:51:01 crc kubenswrapper[4693]: I1122 09:51:01.206390 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" exitCode=0 Nov 22 09:51:01 crc kubenswrapper[4693]: I1122 09:51:01.206452 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba"} Nov 22 09:51:01 crc kubenswrapper[4693]: I1122 09:51:01.206909 4693 scope.go:117] "RemoveContainer" containerID="d080c0d727f9a8fa0a772106ee876e3fffb5d3da555e02db40176872295f0d5c" Nov 22 09:51:01 crc kubenswrapper[4693]: I1122 09:51:01.207899 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:51:01 crc kubenswrapper[4693]: E1122 09:51:01.208181 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:51:07 crc kubenswrapper[4693]: I1122 09:51:07.937549 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-tr5jb_58fe0427-fbdb-40ea-9b6e-80f09e215015/cert-manager-controller/0.log" Nov 22 09:51:08 crc kubenswrapper[4693]: I1122 09:51:08.100694 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-pb9xc_f5108d4d-c652-4cba-8492-281beb13ce46/cert-manager-cainjector/0.log" Nov 22 09:51:08 crc kubenswrapper[4693]: I1122 09:51:08.132436 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-w2r5d_3827b7dd-1354-484e-8430-daec9b09d589/cert-manager-webhook/0.log" Nov 22 09:51:13 crc kubenswrapper[4693]: I1122 09:51:13.147005 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:51:13 crc kubenswrapper[4693]: E1122 09:51:13.147919 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:51:18 crc kubenswrapper[4693]: I1122 09:51:18.263182 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-l4gbq_b1a9c594-5414-44e7-a2ae-e1bd9fca29a3/nmstate-console-plugin/0.log" Nov 22 09:51:18 crc kubenswrapper[4693]: I1122 09:51:18.433061 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-5vqxd_cebd96db-0d01-4c74-a89f-d07b10c6fab8/nmstate-handler/0.log" Nov 22 09:51:18 crc kubenswrapper[4693]: I1122 09:51:18.458592 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-rkrtz_9c890510-4900-4b57-a97e-c15267309d74/kube-rbac-proxy/0.log" Nov 22 09:51:18 crc kubenswrapper[4693]: I1122 09:51:18.483933 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-rkrtz_9c890510-4900-4b57-a97e-c15267309d74/nmstate-metrics/0.log" Nov 22 09:51:18 crc kubenswrapper[4693]: I1122 09:51:18.612799 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-htlwb_ff3958b0-cd30-4470-af03-214de7183eca/nmstate-operator/0.log" Nov 22 09:51:18 crc kubenswrapper[4693]: I1122 09:51:18.641103 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-w59dj_e808ce78-c21f-414a-84cc-0f9b6e6154aa/nmstate-webhook/0.log" Nov 22 09:51:28 crc kubenswrapper[4693]: I1122 09:51:28.148016 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:51:28 crc kubenswrapper[4693]: E1122 09:51:28.149214 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.348646 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-j5p4z_a3aed62f-5abf-4446-9c61-1618025ddc52/kube-rbac-proxy/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.499140 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-j5p4z_a3aed62f-5abf-4446-9c61-1618025ddc52/controller/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.547423 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.702542 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.720252 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.743257 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.786748 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.874636 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.874911 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.924789 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:51:30 crc kubenswrapper[4693]: I1122 09:51:30.928624 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.080972 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.099043 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.101733 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.105431 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/controller/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.236200 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/frr-metrics/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.249350 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/kube-rbac-proxy/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.267709 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/kube-rbac-proxy-frr/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.437952 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/reloader/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.474793 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-dlqvh_d3923021-6731-4289-a3ce-f78f990d6d61/frr-k8s-webhook-server/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.707959 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-688d9f6dd-24fdn_04bf9ebb-5541-4144-879a-1ac25382249d/manager/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.828295 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-df9cf6-744f8_d60d2419-520e-4156-9d05-3b174542f80e/webhook-server/0.log" Nov 22 09:51:31 crc kubenswrapper[4693]: I1122 09:51:31.928283 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5gdzr_a9bc621d-a044-4e19-99d8-297dd534f390/kube-rbac-proxy/0.log" Nov 22 09:51:32 crc kubenswrapper[4693]: I1122 09:51:32.442380 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5gdzr_a9bc621d-a044-4e19-99d8-297dd534f390/speaker/0.log" Nov 22 09:51:32 crc kubenswrapper[4693]: I1122 09:51:32.472656 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/frr/0.log" Nov 22 09:51:41 crc kubenswrapper[4693]: I1122 09:51:41.147467 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:51:41 crc kubenswrapper[4693]: E1122 09:51:41.149739 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.176453 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/util/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.323066 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/util/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.327516 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/pull/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.374716 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/pull/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.475703 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/util/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.496277 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/pull/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.536435 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/extract/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.627517 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-utilities/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.845836 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-utilities/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.893864 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-content/0.log" Nov 22 09:51:43 crc kubenswrapper[4693]: I1122 09:51:43.895762 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-content/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.019273 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-content/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.029820 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-utilities/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.213738 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-utilities/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.401653 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-content/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.419329 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-content/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.427434 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/registry-server/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.438819 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-utilities/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.625479 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-content/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.635673 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-utilities/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.832601 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/util/0.log" Nov 22 09:51:44 crc kubenswrapper[4693]: I1122 09:51:44.927558 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/registry-server/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.180652 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/pull/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.233041 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/util/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.243291 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/pull/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.380258 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/pull/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.443996 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/extract/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.456184 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/util/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.580573 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-mrzgw_651abef9-77a9-4b60-9522-af17781c7a4b/marketplace-operator/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.627856 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-utilities/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.816699 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-content/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.832915 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-content/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.838529 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-utilities/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.974713 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-utilities/0.log" Nov 22 09:51:45 crc kubenswrapper[4693]: I1122 09:51:45.978181 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-content/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.084783 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/registry-server/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.169767 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-utilities/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.322795 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-utilities/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.340802 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-content/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.341723 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-content/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.548173 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-content/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.554081 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-utilities/0.log" Nov 22 09:51:46 crc kubenswrapper[4693]: I1122 09:51:46.879257 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/registry-server/0.log" Nov 22 09:51:52 crc kubenswrapper[4693]: I1122 09:51:52.146928 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:51:52 crc kubenswrapper[4693]: E1122 09:51:52.148115 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:52:03 crc kubenswrapper[4693]: I1122 09:52:03.147342 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:52:03 crc kubenswrapper[4693]: E1122 09:52:03.148066 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:52:16 crc kubenswrapper[4693]: I1122 09:52:16.148233 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:52:16 crc kubenswrapper[4693]: E1122 09:52:16.149400 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:52:31 crc kubenswrapper[4693]: I1122 09:52:31.146762 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:52:31 crc kubenswrapper[4693]: E1122 09:52:31.147745 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:52:45 crc kubenswrapper[4693]: I1122 09:52:45.147166 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:52:45 crc kubenswrapper[4693]: E1122 09:52:45.148147 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:52:56 crc kubenswrapper[4693]: I1122 09:52:56.146439 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:52:56 crc kubenswrapper[4693]: E1122 09:52:56.148765 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:53:09 crc kubenswrapper[4693]: I1122 09:53:09.147558 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:53:09 crc kubenswrapper[4693]: E1122 09:53:09.148711 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:53:10 crc kubenswrapper[4693]: I1122 09:53:10.349605 4693 generic.go:334] "Generic (PLEG): container finished" podID="f86a081c-771d-447d-a212-8171f9360f84" containerID="be31e6c14553b6749e751b69a311097990755535e8e342a931f719fd3f8c1601" exitCode=0 Nov 22 09:53:10 crc kubenswrapper[4693]: I1122 09:53:10.349686 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" event={"ID":"f86a081c-771d-447d-a212-8171f9360f84","Type":"ContainerDied","Data":"be31e6c14553b6749e751b69a311097990755535e8e342a931f719fd3f8c1601"} Nov 22 09:53:10 crc kubenswrapper[4693]: I1122 09:53:10.351083 4693 scope.go:117] "RemoveContainer" containerID="be31e6c14553b6749e751b69a311097990755535e8e342a931f719fd3f8c1601" Nov 22 09:53:11 crc kubenswrapper[4693]: I1122 09:53:11.259387 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lnsf2_must-gather-9rtr5_f86a081c-771d-447d-a212-8171f9360f84/gather/0.log" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.073644 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-lnsf2/must-gather-9rtr5"] Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.075317 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="copy" containerID="cri-o://b8e8d1e0633e2237fc39f44938c6841522dab5720f5aad84d1dcf607ffff1282" gracePeriod=2 Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.092813 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-lnsf2/must-gather-9rtr5"] Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.431144 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lnsf2_must-gather-9rtr5_f86a081c-771d-447d-a212-8171f9360f84/copy/0.log" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.432241 4693 generic.go:334] "Generic (PLEG): container finished" podID="f86a081c-771d-447d-a212-8171f9360f84" containerID="b8e8d1e0633e2237fc39f44938c6841522dab5720f5aad84d1dcf607ffff1282" exitCode=143 Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.432294 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e93aa239a2726479cf8617460ada15a91193381bf3cf115c7b36cb3d02f6c66b" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.483526 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-lnsf2_must-gather-9rtr5_f86a081c-771d-447d-a212-8171f9360f84/copy/0.log" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.484004 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.613983 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/f86a081c-771d-447d-a212-8171f9360f84-must-gather-output\") pod \"f86a081c-771d-447d-a212-8171f9360f84\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.614221 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqfxr\" (UniqueName: \"kubernetes.io/projected/f86a081c-771d-447d-a212-8171f9360f84-kube-api-access-tqfxr\") pod \"f86a081c-771d-447d-a212-8171f9360f84\" (UID: \"f86a081c-771d-447d-a212-8171f9360f84\") " Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.620447 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f86a081c-771d-447d-a212-8171f9360f84-kube-api-access-tqfxr" (OuterVolumeSpecName: "kube-api-access-tqfxr") pod "f86a081c-771d-447d-a212-8171f9360f84" (UID: "f86a081c-771d-447d-a212-8171f9360f84"). InnerVolumeSpecName "kube-api-access-tqfxr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.716806 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqfxr\" (UniqueName: \"kubernetes.io/projected/f86a081c-771d-447d-a212-8171f9360f84-kube-api-access-tqfxr\") on node \"crc\" DevicePath \"\"" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.724586 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f86a081c-771d-447d-a212-8171f9360f84-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "f86a081c-771d-447d-a212-8171f9360f84" (UID: "f86a081c-771d-447d-a212-8171f9360f84"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:53:19 crc kubenswrapper[4693]: I1122 09:53:19.819360 4693 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/f86a081c-771d-447d-a212-8171f9360f84-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 09:53:20 crc kubenswrapper[4693]: I1122 09:53:20.155450 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f86a081c-771d-447d-a212-8171f9360f84" path="/var/lib/kubelet/pods/f86a081c-771d-447d-a212-8171f9360f84/volumes" Nov 22 09:53:20 crc kubenswrapper[4693]: I1122 09:53:20.441920 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-lnsf2/must-gather-9rtr5" Nov 22 09:53:24 crc kubenswrapper[4693]: I1122 09:53:24.152900 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:53:24 crc kubenswrapper[4693]: E1122 09:53:24.154016 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:53:35 crc kubenswrapper[4693]: I1122 09:53:35.147243 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:53:35 crc kubenswrapper[4693]: E1122 09:53:35.148561 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:53:49 crc kubenswrapper[4693]: I1122 09:53:49.147135 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:53:49 crc kubenswrapper[4693]: E1122 09:53:49.148272 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:54:00 crc kubenswrapper[4693]: I1122 09:54:00.146998 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:54:00 crc kubenswrapper[4693]: E1122 09:54:00.147789 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:54:15 crc kubenswrapper[4693]: I1122 09:54:15.147252 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:54:15 crc kubenswrapper[4693]: E1122 09:54:15.148151 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:54:28 crc kubenswrapper[4693]: I1122 09:54:28.147457 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:54:28 crc kubenswrapper[4693]: E1122 09:54:28.148423 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:54:39 crc kubenswrapper[4693]: I1122 09:54:39.146652 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:54:39 crc kubenswrapper[4693]: E1122 09:54:39.148681 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:54:53 crc kubenswrapper[4693]: I1122 09:54:53.147224 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:54:53 crc kubenswrapper[4693]: E1122 09:54:53.148113 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:55:06 crc kubenswrapper[4693]: I1122 09:55:06.147347 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:55:06 crc kubenswrapper[4693]: E1122 09:55:06.149367 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.857401 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qffbp/must-gather-kzwdq"] Nov 22 09:55:10 crc kubenswrapper[4693]: E1122 09:55:10.858308 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="extract-content" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858325 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="extract-content" Nov 22 09:55:10 crc kubenswrapper[4693]: E1122 09:55:10.858355 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="gather" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858361 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="gather" Nov 22 09:55:10 crc kubenswrapper[4693]: E1122 09:55:10.858375 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="extract-utilities" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858381 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="extract-utilities" Nov 22 09:55:10 crc kubenswrapper[4693]: E1122 09:55:10.858398 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="copy" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858403 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="copy" Nov 22 09:55:10 crc kubenswrapper[4693]: E1122 09:55:10.858411 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="registry-server" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858417 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="registry-server" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858599 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="gather" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858616 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f86a081c-771d-447d-a212-8171f9360f84" containerName="copy" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.858624 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="edbd37fc-c87e-468d-aa17-160f5f37707f" containerName="registry-server" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.859539 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.873634 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-qffbp"/"default-dockercfg-ddx7t" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.874402 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-qffbp"/"kube-root-ca.crt" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.891456 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-qffbp"/"openshift-service-ca.crt" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.896677 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-must-gather-output\") pod \"must-gather-kzwdq\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.897100 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fgpkt\" (UniqueName: \"kubernetes.io/projected/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-kube-api-access-fgpkt\") pod \"must-gather-kzwdq\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.909875 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-qffbp/must-gather-kzwdq"] Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.999333 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fgpkt\" (UniqueName: \"kubernetes.io/projected/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-kube-api-access-fgpkt\") pod \"must-gather-kzwdq\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:10 crc kubenswrapper[4693]: I1122 09:55:10.999494 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-must-gather-output\") pod \"must-gather-kzwdq\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:11 crc kubenswrapper[4693]: I1122 09:55:11.000083 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-must-gather-output\") pod \"must-gather-kzwdq\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:11 crc kubenswrapper[4693]: I1122 09:55:11.039668 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fgpkt\" (UniqueName: \"kubernetes.io/projected/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-kube-api-access-fgpkt\") pod \"must-gather-kzwdq\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:11 crc kubenswrapper[4693]: I1122 09:55:11.177066 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:55:11 crc kubenswrapper[4693]: I1122 09:55:11.600881 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-qffbp/must-gather-kzwdq"] Nov 22 09:55:12 crc kubenswrapper[4693]: I1122 09:55:12.428195 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/must-gather-kzwdq" event={"ID":"ccb41945-1c9c-4ee6-a791-e16fdfb849f7","Type":"ContainerStarted","Data":"e7c8bfe1311b37ab89739074062ce069346e1fadee52529c6d6bc71f5c7014c1"} Nov 22 09:55:12 crc kubenswrapper[4693]: I1122 09:55:12.428648 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/must-gather-kzwdq" event={"ID":"ccb41945-1c9c-4ee6-a791-e16fdfb849f7","Type":"ContainerStarted","Data":"b585132fd1a566db3b2a032e44254f78b5035bdc9f6910330d0af6e14df00016"} Nov 22 09:55:12 crc kubenswrapper[4693]: I1122 09:55:12.428664 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/must-gather-kzwdq" event={"ID":"ccb41945-1c9c-4ee6-a791-e16fdfb849f7","Type":"ContainerStarted","Data":"26f530e47b1e5a3785ba6b7c000c20dae7c11df6958dc0f8f3d83b0704626787"} Nov 22 09:55:12 crc kubenswrapper[4693]: I1122 09:55:12.444643 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qffbp/must-gather-kzwdq" podStartSLOduration=2.444627271 podStartE2EDuration="2.444627271s" podCreationTimestamp="2025-11-22 09:55:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:55:12.441346353 +0000 UTC m=+3108.583848643" watchObservedRunningTime="2025-11-22 09:55:12.444627271 +0000 UTC m=+3108.587129563" Nov 22 09:55:14 crc kubenswrapper[4693]: I1122 09:55:14.785350 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qffbp/crc-debug-hvm6d"] Nov 22 09:55:14 crc kubenswrapper[4693]: I1122 09:55:14.786826 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:14 crc kubenswrapper[4693]: I1122 09:55:14.979741 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dfa43355-84c8-493d-a17e-eb2e510c37f4-host\") pod \"crc-debug-hvm6d\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:14 crc kubenswrapper[4693]: I1122 09:55:14.980228 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hwp42\" (UniqueName: \"kubernetes.io/projected/dfa43355-84c8-493d-a17e-eb2e510c37f4-kube-api-access-hwp42\") pod \"crc-debug-hvm6d\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:15 crc kubenswrapper[4693]: I1122 09:55:15.082544 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hwp42\" (UniqueName: \"kubernetes.io/projected/dfa43355-84c8-493d-a17e-eb2e510c37f4-kube-api-access-hwp42\") pod \"crc-debug-hvm6d\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:15 crc kubenswrapper[4693]: I1122 09:55:15.082874 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dfa43355-84c8-493d-a17e-eb2e510c37f4-host\") pod \"crc-debug-hvm6d\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:15 crc kubenswrapper[4693]: I1122 09:55:15.083056 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dfa43355-84c8-493d-a17e-eb2e510c37f4-host\") pod \"crc-debug-hvm6d\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:15 crc kubenswrapper[4693]: I1122 09:55:15.106318 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hwp42\" (UniqueName: \"kubernetes.io/projected/dfa43355-84c8-493d-a17e-eb2e510c37f4-kube-api-access-hwp42\") pod \"crc-debug-hvm6d\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:15 crc kubenswrapper[4693]: I1122 09:55:15.406247 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:15 crc kubenswrapper[4693]: W1122 09:55:15.440247 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddfa43355_84c8_493d_a17e_eb2e510c37f4.slice/crio-add81e97ca590bfbd294b8a05abe795fa6866f9bf07eb8cc494cd1857e8959da WatchSource:0}: Error finding container add81e97ca590bfbd294b8a05abe795fa6866f9bf07eb8cc494cd1857e8959da: Status 404 returned error can't find the container with id add81e97ca590bfbd294b8a05abe795fa6866f9bf07eb8cc494cd1857e8959da Nov 22 09:55:15 crc kubenswrapper[4693]: I1122 09:55:15.451653 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" event={"ID":"dfa43355-84c8-493d-a17e-eb2e510c37f4","Type":"ContainerStarted","Data":"add81e97ca590bfbd294b8a05abe795fa6866f9bf07eb8cc494cd1857e8959da"} Nov 22 09:55:16 crc kubenswrapper[4693]: I1122 09:55:16.461685 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" event={"ID":"dfa43355-84c8-493d-a17e-eb2e510c37f4","Type":"ContainerStarted","Data":"8ad1cd3b33be8118b1f35566080ec2f6dd70b9c2995d94a73bde5a6b32f8ad13"} Nov 22 09:55:16 crc kubenswrapper[4693]: I1122 09:55:16.481353 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" podStartSLOduration=2.481335097 podStartE2EDuration="2.481335097s" podCreationTimestamp="2025-11-22 09:55:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 09:55:16.473491809 +0000 UTC m=+3112.615994100" watchObservedRunningTime="2025-11-22 09:55:16.481335097 +0000 UTC m=+3112.623837388" Nov 22 09:55:18 crc kubenswrapper[4693]: I1122 09:55:18.147000 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:55:18 crc kubenswrapper[4693]: E1122 09:55:18.147629 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:55:29 crc kubenswrapper[4693]: I1122 09:55:29.147815 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:55:29 crc kubenswrapper[4693]: E1122 09:55:29.148801 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:55:33 crc kubenswrapper[4693]: I1122 09:55:33.539649 4693 scope.go:117] "RemoveContainer" containerID="b8e8d1e0633e2237fc39f44938c6841522dab5720f5aad84d1dcf607ffff1282" Nov 22 09:55:33 crc kubenswrapper[4693]: I1122 09:55:33.570115 4693 scope.go:117] "RemoveContainer" containerID="be31e6c14553b6749e751b69a311097990755535e8e342a931f719fd3f8c1601" Nov 22 09:55:33 crc kubenswrapper[4693]: I1122 09:55:33.632884 4693 scope.go:117] "RemoveContainer" containerID="ea7c768533b898e9241a7ab8a510da36453a9595b3906c69155420323e96868c" Nov 22 09:55:41 crc kubenswrapper[4693]: I1122 09:55:41.682592 4693 generic.go:334] "Generic (PLEG): container finished" podID="dfa43355-84c8-493d-a17e-eb2e510c37f4" containerID="8ad1cd3b33be8118b1f35566080ec2f6dd70b9c2995d94a73bde5a6b32f8ad13" exitCode=0 Nov 22 09:55:41 crc kubenswrapper[4693]: I1122 09:55:41.682675 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" event={"ID":"dfa43355-84c8-493d-a17e-eb2e510c37f4","Type":"ContainerDied","Data":"8ad1cd3b33be8118b1f35566080ec2f6dd70b9c2995d94a73bde5a6b32f8ad13"} Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.768324 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.792975 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qffbp/crc-debug-hvm6d"] Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.798281 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qffbp/crc-debug-hvm6d"] Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.871323 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dfa43355-84c8-493d-a17e-eb2e510c37f4-host\") pod \"dfa43355-84c8-493d-a17e-eb2e510c37f4\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.871422 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hwp42\" (UniqueName: \"kubernetes.io/projected/dfa43355-84c8-493d-a17e-eb2e510c37f4-kube-api-access-hwp42\") pod \"dfa43355-84c8-493d-a17e-eb2e510c37f4\" (UID: \"dfa43355-84c8-493d-a17e-eb2e510c37f4\") " Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.871423 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dfa43355-84c8-493d-a17e-eb2e510c37f4-host" (OuterVolumeSpecName: "host") pod "dfa43355-84c8-493d-a17e-eb2e510c37f4" (UID: "dfa43355-84c8-493d-a17e-eb2e510c37f4"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.872187 4693 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/dfa43355-84c8-493d-a17e-eb2e510c37f4-host\") on node \"crc\" DevicePath \"\"" Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.877112 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfa43355-84c8-493d-a17e-eb2e510c37f4-kube-api-access-hwp42" (OuterVolumeSpecName: "kube-api-access-hwp42") pod "dfa43355-84c8-493d-a17e-eb2e510c37f4" (UID: "dfa43355-84c8-493d-a17e-eb2e510c37f4"). InnerVolumeSpecName "kube-api-access-hwp42". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:55:42 crc kubenswrapper[4693]: I1122 09:55:42.974085 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hwp42\" (UniqueName: \"kubernetes.io/projected/dfa43355-84c8-493d-a17e-eb2e510c37f4-kube-api-access-hwp42\") on node \"crc\" DevicePath \"\"" Nov 22 09:55:43 crc kubenswrapper[4693]: I1122 09:55:43.708350 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="add81e97ca590bfbd294b8a05abe795fa6866f9bf07eb8cc494cd1857e8959da" Nov 22 09:55:43 crc kubenswrapper[4693]: I1122 09:55:43.708436 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-hvm6d" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.013570 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qffbp/crc-debug-qchr5"] Nov 22 09:55:44 crc kubenswrapper[4693]: E1122 09:55:44.014808 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfa43355-84c8-493d-a17e-eb2e510c37f4" containerName="container-00" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.014912 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfa43355-84c8-493d-a17e-eb2e510c37f4" containerName="container-00" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.015179 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfa43355-84c8-493d-a17e-eb2e510c37f4" containerName="container-00" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.015882 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.109432 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n22lc\" (UniqueName: \"kubernetes.io/projected/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-kube-api-access-n22lc\") pod \"crc-debug-qchr5\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.109597 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-host\") pod \"crc-debug-qchr5\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.155399 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:55:44 crc kubenswrapper[4693]: E1122 09:55:44.155810 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.159716 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfa43355-84c8-493d-a17e-eb2e510c37f4" path="/var/lib/kubelet/pods/dfa43355-84c8-493d-a17e-eb2e510c37f4/volumes" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.211701 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n22lc\" (UniqueName: \"kubernetes.io/projected/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-kube-api-access-n22lc\") pod \"crc-debug-qchr5\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.211901 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-host\") pod \"crc-debug-qchr5\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.212178 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-host\") pod \"crc-debug-qchr5\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.231887 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n22lc\" (UniqueName: \"kubernetes.io/projected/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-kube-api-access-n22lc\") pod \"crc-debug-qchr5\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.331573 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.717199 4693 generic.go:334] "Generic (PLEG): container finished" podID="87ff31f2-7d38-4f35-834b-a0c7ae5b474e" containerID="426654f6569d79d03db8e6215b63dc2cbbf7066e034f1f927b9f3a04275820c4" exitCode=0 Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.717248 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-qchr5" event={"ID":"87ff31f2-7d38-4f35-834b-a0c7ae5b474e","Type":"ContainerDied","Data":"426654f6569d79d03db8e6215b63dc2cbbf7066e034f1f927b9f3a04275820c4"} Nov 22 09:55:44 crc kubenswrapper[4693]: I1122 09:55:44.717284 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-qchr5" event={"ID":"87ff31f2-7d38-4f35-834b-a0c7ae5b474e","Type":"ContainerStarted","Data":"0bc864eea11ac87c3e40cccc083243ffc3816aef7fff1d4a8ace9af99824e05f"} Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.124034 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qffbp/crc-debug-qchr5"] Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.131892 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qffbp/crc-debug-qchr5"] Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.812065 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.948427 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-host\") pod \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.948551 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n22lc\" (UniqueName: \"kubernetes.io/projected/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-kube-api-access-n22lc\") pod \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\" (UID: \"87ff31f2-7d38-4f35-834b-a0c7ae5b474e\") " Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.948668 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-host" (OuterVolumeSpecName: "host") pod "87ff31f2-7d38-4f35-834b-a0c7ae5b474e" (UID: "87ff31f2-7d38-4f35-834b-a0c7ae5b474e"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.949581 4693 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-host\") on node \"crc\" DevicePath \"\"" Nov 22 09:55:45 crc kubenswrapper[4693]: I1122 09:55:45.955217 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-kube-api-access-n22lc" (OuterVolumeSpecName: "kube-api-access-n22lc") pod "87ff31f2-7d38-4f35-834b-a0c7ae5b474e" (UID: "87ff31f2-7d38-4f35-834b-a0c7ae5b474e"). InnerVolumeSpecName "kube-api-access-n22lc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.051589 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n22lc\" (UniqueName: \"kubernetes.io/projected/87ff31f2-7d38-4f35-834b-a0c7ae5b474e-kube-api-access-n22lc\") on node \"crc\" DevicePath \"\"" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.157009 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87ff31f2-7d38-4f35-834b-a0c7ae5b474e" path="/var/lib/kubelet/pods/87ff31f2-7d38-4f35-834b-a0c7ae5b474e/volumes" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.307365 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-qffbp/crc-debug-lrtb4"] Nov 22 09:55:46 crc kubenswrapper[4693]: E1122 09:55:46.308136 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ff31f2-7d38-4f35-834b-a0c7ae5b474e" containerName="container-00" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.308185 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ff31f2-7d38-4f35-834b-a0c7ae5b474e" containerName="container-00" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.308724 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="87ff31f2-7d38-4f35-834b-a0c7ae5b474e" containerName="container-00" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.309591 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.358071 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-host\") pod \"crc-debug-lrtb4\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.358392 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x989d\" (UniqueName: \"kubernetes.io/projected/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-kube-api-access-x989d\") pod \"crc-debug-lrtb4\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.460020 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x989d\" (UniqueName: \"kubernetes.io/projected/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-kube-api-access-x989d\") pod \"crc-debug-lrtb4\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.460139 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-host\") pod \"crc-debug-lrtb4\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.460231 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-host\") pod \"crc-debug-lrtb4\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.488402 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x989d\" (UniqueName: \"kubernetes.io/projected/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-kube-api-access-x989d\") pod \"crc-debug-lrtb4\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.631426 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:46 crc kubenswrapper[4693]: W1122 09:55:46.659966 4693 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5a871b06_5ad1_47bf_bdbc_a484ecf8b4af.slice/crio-37a75c6c7c124a842f2fa6fad241ec8f14bbed65637a369be5d655978823672e WatchSource:0}: Error finding container 37a75c6c7c124a842f2fa6fad241ec8f14bbed65637a369be5d655978823672e: Status 404 returned error can't find the container with id 37a75c6c7c124a842f2fa6fad241ec8f14bbed65637a369be5d655978823672e Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.736162 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-lrtb4" event={"ID":"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af","Type":"ContainerStarted","Data":"37a75c6c7c124a842f2fa6fad241ec8f14bbed65637a369be5d655978823672e"} Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.738067 4693 scope.go:117] "RemoveContainer" containerID="426654f6569d79d03db8e6215b63dc2cbbf7066e034f1f927b9f3a04275820c4" Nov 22 09:55:46 crc kubenswrapper[4693]: I1122 09:55:46.738159 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-qchr5" Nov 22 09:55:47 crc kubenswrapper[4693]: I1122 09:55:47.748998 4693 generic.go:334] "Generic (PLEG): container finished" podID="5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" containerID="dca204a8415045f410b68e0eff5fbb141d2e38be69e030c9df25dea4272425ae" exitCode=0 Nov 22 09:55:47 crc kubenswrapper[4693]: I1122 09:55:47.749085 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/crc-debug-lrtb4" event={"ID":"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af","Type":"ContainerDied","Data":"dca204a8415045f410b68e0eff5fbb141d2e38be69e030c9df25dea4272425ae"} Nov 22 09:55:47 crc kubenswrapper[4693]: I1122 09:55:47.784407 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qffbp/crc-debug-lrtb4"] Nov 22 09:55:47 crc kubenswrapper[4693]: I1122 09:55:47.791011 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qffbp/crc-debug-lrtb4"] Nov 22 09:55:48 crc kubenswrapper[4693]: I1122 09:55:48.838522 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:48 crc kubenswrapper[4693]: I1122 09:55:48.905753 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-host\") pod \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " Nov 22 09:55:48 crc kubenswrapper[4693]: I1122 09:55:48.905812 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x989d\" (UniqueName: \"kubernetes.io/projected/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-kube-api-access-x989d\") pod \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\" (UID: \"5a871b06-5ad1-47bf-bdbc-a484ecf8b4af\") " Nov 22 09:55:48 crc kubenswrapper[4693]: I1122 09:55:48.906663 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-host" (OuterVolumeSpecName: "host") pod "5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" (UID: "5a871b06-5ad1-47bf-bdbc-a484ecf8b4af"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 22 09:55:48 crc kubenswrapper[4693]: I1122 09:55:48.911533 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-kube-api-access-x989d" (OuterVolumeSpecName: "kube-api-access-x989d") pod "5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" (UID: "5a871b06-5ad1-47bf-bdbc-a484ecf8b4af"). InnerVolumeSpecName "kube-api-access-x989d". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:55:49 crc kubenswrapper[4693]: I1122 09:55:49.007272 4693 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-host\") on node \"crc\" DevicePath \"\"" Nov 22 09:55:49 crc kubenswrapper[4693]: I1122 09:55:49.007296 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x989d\" (UniqueName: \"kubernetes.io/projected/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af-kube-api-access-x989d\") on node \"crc\" DevicePath \"\"" Nov 22 09:55:49 crc kubenswrapper[4693]: I1122 09:55:49.775926 4693 scope.go:117] "RemoveContainer" containerID="dca204a8415045f410b68e0eff5fbb141d2e38be69e030c9df25dea4272425ae" Nov 22 09:55:49 crc kubenswrapper[4693]: I1122 09:55:49.776304 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/crc-debug-lrtb4" Nov 22 09:55:50 crc kubenswrapper[4693]: I1122 09:55:50.155628 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" path="/var/lib/kubelet/pods/5a871b06-5ad1-47bf-bdbc-a484ecf8b4af/volumes" Nov 22 09:55:55 crc kubenswrapper[4693]: I1122 09:55:55.147483 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:55:55 crc kubenswrapper[4693]: E1122 09:55:55.148216 4693 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-scx6r_openshift-machine-config-operator(7007d901-fc52-4723-a949-db71619b3305)\"" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" Nov 22 09:56:08 crc kubenswrapper[4693]: I1122 09:56:08.147282 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 09:56:08 crc kubenswrapper[4693]: I1122 09:56:08.960292 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"79aed3d107804d7115879643c78a9a6b579059fa4c496c597a5d78b7f00bfa93"} Nov 22 09:56:09 crc kubenswrapper[4693]: I1122 09:56:09.624880 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6875cccfb6-frgx5_01894220-3ce0-4535-9d19-95f573987428/barbican-api/0.log" Nov 22 09:56:09 crc kubenswrapper[4693]: I1122 09:56:09.730416 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-6875cccfb6-frgx5_01894220-3ce0-4535-9d19-95f573987428/barbican-api-log/0.log" Nov 22 09:56:09 crc kubenswrapper[4693]: I1122 09:56:09.780412 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-64777f45c8-bwpgz_2ea61845-fe45-4eed-a854-92545e309870/barbican-keystone-listener/0.log" Nov 22 09:56:09 crc kubenswrapper[4693]: I1122 09:56:09.812327 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-64777f45c8-bwpgz_2ea61845-fe45-4eed-a854-92545e309870/barbican-keystone-listener-log/0.log" Nov 22 09:56:09 crc kubenswrapper[4693]: I1122 09:56:09.951550 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f9b77d547-xf85h_1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40/barbican-worker/0.log" Nov 22 09:56:09 crc kubenswrapper[4693]: I1122 09:56:09.965065 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-6f9b77d547-xf85h_1cd57141-9a7a-4b43-aa1e-c1fcbd6b4c40/barbican-worker-log/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.153127 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/ceilometer-central-agent/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.222737 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-s6lj2_3854e608-0001-4cad-bf75-68cfa7a1486f/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.223532 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/ceilometer-notification-agent/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.335126 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/proxy-httpd/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.362561 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_ee195f08-f515-47ee-bdc0-34d1396136be/sg-core/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.412896 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ab074cff-55c1-4cc2-ac57-05c7948418c0/cinder-api/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.484618 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_ab074cff-55c1-4cc2-ac57-05c7948418c0/cinder-api-log/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.633617 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5284f375-48e8-4caa-a5f9-fa762001ce69/cinder-scheduler/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.687989 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_5284f375-48e8-4caa-a5f9-fa762001ce69/probe/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.834015 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-5ljv8_a63528a4-99a1-4adc-8947-d914a74e4d8b/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.854959 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-lvpf7_7bd4f766-0c89-4aed-9f9a-15657c6e1efa/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:10 crc kubenswrapper[4693]: I1122 09:56:10.975678 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d5cf5b645-s8pnc_ccb21e39-74b5-4a8f-ad6a-8c1dede8e334/init/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.141040 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d5cf5b645-s8pnc_ccb21e39-74b5-4a8f-ad6a-8c1dede8e334/init/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.199432 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-5d5cf5b645-s8pnc_ccb21e39-74b5-4a8f-ad6a-8c1dede8e334/dnsmasq-dns/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.250246 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-dqlcn_ac0f3797-4b0b-4b88-8624-95e289cf2386/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.385985 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_722dcbf6-ae9d-444d-9839-24b0b0e900db/glance-httpd/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.416119 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_722dcbf6-ae9d-444d-9839-24b0b0e900db/glance-log/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.552227 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2586c0ae-8b14-49b8-8787-d14da284b596/glance-log/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.588817 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_2586c0ae-8b14-49b8-8787-d14da284b596/glance-httpd/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.710556 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7fd67558f8-nzmxr_dd3b9780-060c-4788-9800-20c1ac3b2e95/horizon/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.900887 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-qfzj5_118d3e39-2ce7-4a88-b9c7-869e5e83a568/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:11 crc kubenswrapper[4693]: I1122 09:56:11.960326 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-7fd67558f8-nzmxr_dd3b9780-060c-4788-9800-20c1ac3b2e95/horizon-log/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.049814 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-lwtlp_2a20394f-6ab1-4de1-aa64-df5a655364bb/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.233151 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-5947459fbf-s5xqj_5d2c816b-f30a-4a96-8f72-5f023d95e3be/keystone-api/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.235096 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_ff38a410-4f38-40dc-afa6-2d2cca6054ca/kube-state-metrics/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.348018 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-ckn7m_537a26f3-39a2-48e4-af18-39d0e944c4b0/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.593330 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7fbc84ccfc-8tdp6_bd35a927-864c-4718-891f-1a036a99ddfb/neutron-api/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.612610 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7fbc84ccfc-8tdp6_bd35a927-864c-4718-891f-1a036a99ddfb/neutron-httpd/0.log" Nov 22 09:56:12 crc kubenswrapper[4693]: I1122 09:56:12.884959 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-6r7tt_ecdf1a42-8112-4bcc-b356-e364d56b93bb/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:13 crc kubenswrapper[4693]: I1122 09:56:13.260043 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5079844b-9574-4583-97b2-8232271e5681/nova-api-log/0.log" Nov 22 09:56:13 crc kubenswrapper[4693]: I1122 09:56:13.442059 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_a4da75dc-7806-41ea-9fdd-d7ed1ca362d3/nova-cell0-conductor-conductor/0.log" Nov 22 09:56:13 crc kubenswrapper[4693]: I1122 09:56:13.629167 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_ca2d255f-4890-4fa9-85e7-47ab34607956/nova-cell1-conductor-conductor/0.log" Nov 22 09:56:13 crc kubenswrapper[4693]: I1122 09:56:13.645575 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_5079844b-9574-4583-97b2-8232271e5681/nova-api-api/0.log" Nov 22 09:56:13 crc kubenswrapper[4693]: I1122 09:56:13.722669 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_7135aae5-46b3-4654-aba4-70a6f2df0d1e/nova-cell1-novncproxy-novncproxy/0.log" Nov 22 09:56:13 crc kubenswrapper[4693]: I1122 09:56:13.873253 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-vt57h_75221b44-0170-4231-b768-ad88de26addb/nova-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.006122 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1569b6f7-3def-4eb6-87e7-5705b74b1fed/nova-metadata-log/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.253683 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_957b6aef-f771-4011-b712-e53794ad836a/mysql-bootstrap/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.343290 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_d0427665-286b-4058-bec9-917d31c200e3/nova-scheduler-scheduler/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.432197 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_957b6aef-f771-4011-b712-e53794ad836a/mysql-bootstrap/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.492192 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_957b6aef-f771-4011-b712-e53794ad836a/galera/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.623118 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d64e5e3-8006-4833-a05d-705799403cc2/mysql-bootstrap/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.811200 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d64e5e3-8006-4833-a05d-705799403cc2/galera/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.860669 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_1d64e5e3-8006-4833-a05d-705799403cc2/mysql-bootstrap/0.log" Nov 22 09:56:14 crc kubenswrapper[4693]: I1122 09:56:14.977417 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_6f3c6ba6-cbba-4d15-9bdc-ce4aedc44a3d/openstackclient/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.007220 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_1569b6f7-3def-4eb6-87e7-5705b74b1fed/nova-metadata-metadata/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.031479 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-czsjw_6ebf2de1-2769-47bc-8136-4ff7460b89b1/ovn-controller/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.210095 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-h8klh_9f9e4335-a9aa-4a2c-8300-25680a90ab8a/openstack-network-exporter/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.369012 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovsdb-server-init/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.463293 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovsdb-server-init/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.489460 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovsdb-server/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.494215 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-xk5h2_da762aee-c526-4bbb-a724-9135350b6528/ovs-vswitchd/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.703645 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7728d59-f897-43e3-a7d6-7d1704f41739/openstack-network-exporter/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.709035 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-gtdvj_e06049f5-cf35-4685-9cae-cf2c1cfa2dda/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.853454 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_f7728d59-f897-43e3-a7d6-7d1704f41739/ovn-northd/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.971492 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_645f7714-f41d-4ece-85ef-8043bc2ca51d/openstack-network-exporter/0.log" Nov 22 09:56:15 crc kubenswrapper[4693]: I1122 09:56:15.997908 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_645f7714-f41d-4ece-85ef-8043bc2ca51d/ovsdbserver-nb/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.327766 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_db7c316e-a7ee-4c1a-a663-b02279df3b3e/openstack-network-exporter/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.339441 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_db7c316e-a7ee-4c1a-a663-b02279df3b3e/ovsdbserver-sb/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.503059 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-9f6899d66-t5sml_5b49963a-32e1-4500-969a-b7feaa78d4d3/placement-api/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.621802 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-9f6899d66-t5sml_5b49963a-32e1-4500-969a-b7feaa78d4d3/placement-log/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.628737 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b0753e7f-679e-4da7-a765-d1d220684511/setup-container/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.780811 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b0753e7f-679e-4da7-a765-d1d220684511/setup-container/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.820010 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_95a8e4f8-d504-40f1-8137-34a70c82e9cb/setup-container/0.log" Nov 22 09:56:16 crc kubenswrapper[4693]: I1122 09:56:16.895306 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_b0753e7f-679e-4da7-a765-d1d220684511/rabbitmq/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.025332 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_95a8e4f8-d504-40f1-8137-34a70c82e9cb/setup-container/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.079230 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-mbd4t_b32380c7-b430-47aa-8694-054df2442f2b/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.089486 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_95a8e4f8-d504-40f1-8137-34a70c82e9cb/rabbitmq/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.230344 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-kblb8_7dd57e37-dbd8-422b-9ce9-ba054526ddd3/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.349017 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-wsk4r_7529ff10-b67c-4bd3-aa41-46de267c73f3/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.455420 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-lxlhk_da9ee799-029a-485c-a5b6-bbdc64697c71/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.594726 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-fd2vc_12096b4e-7f75-4fcb-be29-331232a1e64b/ssh-known-hosts-edpm-deployment/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.764015 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-795d6c4bbf-rtw6h_51c40064-56a3-4186-bf60-6181686b256d/proxy-httpd/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.794835 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-795d6c4bbf-rtw6h_51c40064-56a3-4186-bf60-6181686b256d/proxy-server/0.log" Nov 22 09:56:17 crc kubenswrapper[4693]: I1122 09:56:17.945225 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-p9zqf_23436668-ec00-4623-b854-aec77bf25158/swift-ring-rebalance/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.026812 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-auditor/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.072736 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-reaper/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.174679 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-replicator/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.213621 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/account-server/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.242073 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-auditor/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.292244 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-replicator/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.352656 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-server/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.377695 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/container-updater/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.467634 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-auditor/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.491603 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-expirer/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.547374 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-replicator/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.587534 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-server/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.681415 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/rsync/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.713683 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/object-updater/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.737616 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_50bf6e78-87b2-416e-89b5-fa163645a184/swift-recon-cron/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.935533 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_tempest-tests-tempest_fe7aa61b-ddcb-48b3-9d95-c203790e13e5/tempest-tests-tempest-tests-runner/0.log" Nov 22 09:56:18 crc kubenswrapper[4693]: I1122 09:56:18.945733 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_telemetry-edpm-deployment-openstack-edpm-ipam-hwmxz_021fbba7-764a-4284-a4f1-1b8db668d9fd/telemetry-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:19 crc kubenswrapper[4693]: I1122 09:56:19.147171 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_test-operator-logs-pod-tempest-tempest-tests-tempest_1eef7711-f623-4d45-bf25-449b7cbc4e53/test-operator-logs-container/0.log" Nov 22 09:56:19 crc kubenswrapper[4693]: I1122 09:56:19.175094 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-dr42c_a19ea96b-8910-41e1-a8c6-901206473d72/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Nov 22 09:56:28 crc kubenswrapper[4693]: I1122 09:56:28.439200 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_cbaee87f-e8c6-4e56-9b59-c0f50054c172/memcached/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.662376 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/util/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.759445 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/pull/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.811745 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/pull/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.813247 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/util/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.928914 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/pull/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.946056 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/util/0.log" Nov 22 09:56:40 crc kubenswrapper[4693]: I1122 09:56:40.948612 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_1ca9b138781dcf125934bc878376abf75f877c2252ee8cf8f3500b7287wrmr6_ddd4c776-1709-4074-ac2f-8c9f37aa52f1/extract/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.090388 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7768f8c84f-c42f7_f1bb1578-9697-4968-b36f-b77d228fafaa/kube-rbac-proxy/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.158366 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-7768f8c84f-c42f7_f1bb1578-9697-4968-b36f-b77d228fafaa/manager/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.183471 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6d8fd67bf7-lz798_cadb4705-8655-4f69-b00f-049e64a71b28/kube-rbac-proxy/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.312140 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-6d8fd67bf7-lz798_cadb4705-8655-4f69-b00f-049e64a71b28/manager/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.354624 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-56dfb6b67f-r7t48_390da39a-b184-4348-9894-af8f4237aba8/kube-rbac-proxy/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.356597 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-56dfb6b67f-r7t48_390da39a-b184-4348-9894-af8f4237aba8/manager/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.473832 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8667fbf6f6-xh2cl_60c907e3-d225-414c-a15c-6f0a6999eb9d/kube-rbac-proxy/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.587296 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-8667fbf6f6-xh2cl_60c907e3-d225-414c-a15c-6f0a6999eb9d/manager/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.609132 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-bf4c6585d-vts55_0d573b45-216b-4869-96f6-c460bb7ff10f/kube-rbac-proxy/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.686211 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-bf4c6585d-vts55_0d573b45-216b-4869-96f6-c460bb7ff10f/manager/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.751418 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d86b44686-j6jzp_e1cfaae9-e5b8-4826-9e34-6fce5657c237/kube-rbac-proxy/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.795978 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-5d86b44686-j6jzp_e1cfaae9-e5b8-4826-9e34-6fce5657c237/manager/0.log" Nov 22 09:56:41 crc kubenswrapper[4693]: I1122 09:56:41.890803 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-769d9c7585-9g6fk_4b168504-e6a6-48c2-a8af-dc6a44c77e59/kube-rbac-proxy/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.055812 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-769d9c7585-9g6fk_4b168504-e6a6-48c2-a8af-dc6a44c77e59/manager/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.060234 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5c75d7c94b-92vqr_23476d93-a604-4bc2-9e83-5c59e574436c/manager/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.079690 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5c75d7c94b-92vqr_23476d93-a604-4bc2-9e83-5c59e574436c/kube-rbac-proxy/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.214388 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7879fb76fd-5xnb4_703d454c-2336-4941-8fe9-5b717f57423f/kube-rbac-proxy/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.270603 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-7879fb76fd-5xnb4_703d454c-2336-4941-8fe9-5b717f57423f/manager/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.390730 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7bb88cb858-gm5p8_9ea19f26-5477-43e9-84a2-1b8cf72f4f81/kube-rbac-proxy/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.421607 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-7bb88cb858-gm5p8_9ea19f26-5477-43e9-84a2-1b8cf72f4f81/manager/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.466298 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6f8c5b86cb-t6hdc_9acf7acc-6712-4fbc-ab1c-14a9e1076ab8/kube-rbac-proxy/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.627961 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-6f8c5b86cb-t6hdc_9acf7acc-6712-4fbc-ab1c-14a9e1076ab8/manager/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.638986 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-66b7d6f598-wd7tg_ed8b2ca0-7928-41f6-8e30-787058fa0808/kube-rbac-proxy/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.713511 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-66b7d6f598-wd7tg_ed8b2ca0-7928-41f6-8e30-787058fa0808/manager/0.log" Nov 22 09:56:42 crc kubenswrapper[4693]: I1122 09:56:42.966450 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-86d796d84d-k8mcb_750a5fd2-0554-4a9d-a16b-9e82cb56694f/kube-rbac-proxy/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.032099 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-86d796d84d-k8mcb_750a5fd2-0554-4a9d-a16b-9e82cb56694f/manager/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.140922 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6fdc856c5d-q68sw_6bad4eeb-497a-4459-af08-c6f1db9ee8bf/manager/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.156674 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-6fdc856c5d-q68sw_6bad4eeb-497a-4459-af08-c6f1db9ee8bf/kube-rbac-proxy/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.223608 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4_a100268d-89c1-412b-82a6-843711bcb44b/kube-rbac-proxy/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.322945 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-79d88dcd44ggrh4_a100268d-89c1-412b-82a6-843711bcb44b/manager/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.407730 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6cb9dc54f8-qv89g_261611c7-97d3-444c-bba5-e06e1593a5e4/kube-rbac-proxy/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.579081 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-8486c7f98b-hntsn_13a826c8-8c21-452b-80d9-237f609a62a5/kube-rbac-proxy/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.772206 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-8486c7f98b-hntsn_13a826c8-8c21-452b-80d9-237f609a62a5/operator/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.815669 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-5zdsq_932a4450-e8f1-4d96-acf5-1249c1f7cb07/registry-server/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.853556 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5bdf4f7f7f-h6k74_bbbb8d7a-ee06-4f2e-9982-97b6ee86801d/kube-rbac-proxy/0.log" Nov 22 09:56:43 crc kubenswrapper[4693]: I1122 09:56:43.987488 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-5bdf4f7f7f-h6k74_bbbb8d7a-ee06-4f2e-9982-97b6ee86801d/manager/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.008693 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-6dc664666c-mqzzr_bbb5f842-47da-40fc-a082-50323f1f10f8/kube-rbac-proxy/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.072180 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-6dc664666c-mqzzr_bbb5f842-47da-40fc-a082-50323f1f10f8/manager/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.239442 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-zhzhn_d7bbf03f-0d49-4144-8ea9-0303a2e5c86e/operator/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.283609 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-799cb6ffd6-vnrh2_89938523-c1a9-4f6e-aebb-396a3cd509c6/kube-rbac-proxy/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.430994 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-6cb9dc54f8-qv89g_261611c7-97d3-444c-bba5-e06e1593a5e4/manager/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.433580 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-799cb6ffd6-vnrh2_89938523-c1a9-4f6e-aebb-396a3cd509c6/manager/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.460527 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-7798859c74-kz8xk_1b962230-a413-4f39-a8c8-bed04c898724/kube-rbac-proxy/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.496510 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-7798859c74-kz8xk_1b962230-a413-4f39-a8c8-bed04c898724/manager/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.599812 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-8464cf66df-cl5qf_3baa8fe0-513d-4a42-a83a-4cc6fbf0e938/kube-rbac-proxy/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.613336 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-8464cf66df-cl5qf_3baa8fe0-513d-4a42-a83a-4cc6fbf0e938/manager/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.689263 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7cd4fb6f79-298gp_1f579f14-5558-45aa-9fa5-da9ee0ccac02/kube-rbac-proxy/0.log" Nov 22 09:56:44 crc kubenswrapper[4693]: I1122 09:56:44.787356 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-7cd4fb6f79-298gp_1f579f14-5558-45aa-9fa5-da9ee0ccac02/manager/0.log" Nov 22 09:56:58 crc kubenswrapper[4693]: I1122 09:56:58.212878 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-gqsrq_f6d86049-ce66-4900-bc42-b1ac6864e79a/control-plane-machine-set-operator/0.log" Nov 22 09:56:58 crc kubenswrapper[4693]: I1122 09:56:58.353392 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-chjcb_56f55ada-5668-4a07-888e-1c578214f660/kube-rbac-proxy/0.log" Nov 22 09:56:58 crc kubenswrapper[4693]: I1122 09:56:58.387750 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-chjcb_56f55ada-5668-4a07-888e-1c578214f660/machine-api-operator/0.log" Nov 22 09:57:08 crc kubenswrapper[4693]: I1122 09:57:08.842402 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-tr5jb_58fe0427-fbdb-40ea-9b6e-80f09e215015/cert-manager-controller/0.log" Nov 22 09:57:08 crc kubenswrapper[4693]: I1122 09:57:08.936039 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-pb9xc_f5108d4d-c652-4cba-8492-281beb13ce46/cert-manager-cainjector/0.log" Nov 22 09:57:08 crc kubenswrapper[4693]: I1122 09:57:08.973704 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-w2r5d_3827b7dd-1354-484e-8430-daec9b09d589/cert-manager-webhook/0.log" Nov 22 09:57:19 crc kubenswrapper[4693]: I1122 09:57:19.172718 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-5874bd7bc5-l4gbq_b1a9c594-5414-44e7-a2ae-e1bd9fca29a3/nmstate-console-plugin/0.log" Nov 22 09:57:19 crc kubenswrapper[4693]: I1122 09:57:19.342032 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-5vqxd_cebd96db-0d01-4c74-a89f-d07b10c6fab8/nmstate-handler/0.log" Nov 22 09:57:19 crc kubenswrapper[4693]: I1122 09:57:19.396506 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-rkrtz_9c890510-4900-4b57-a97e-c15267309d74/kube-rbac-proxy/0.log" Nov 22 09:57:19 crc kubenswrapper[4693]: I1122 09:57:19.452387 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-5dcf9c57c5-rkrtz_9c890510-4900-4b57-a97e-c15267309d74/nmstate-metrics/0.log" Nov 22 09:57:19 crc kubenswrapper[4693]: I1122 09:57:19.510029 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-557fdffb88-htlwb_ff3958b0-cd30-4470-af03-214de7183eca/nmstate-operator/0.log" Nov 22 09:57:19 crc kubenswrapper[4693]: I1122 09:57:19.604488 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6b89b748d8-w59dj_e808ce78-c21f-414a-84cc-0f9b6e6154aa/nmstate-webhook/0.log" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.000074 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-w6z9s"] Nov 22 09:57:22 crc kubenswrapper[4693]: E1122 09:57:22.000504 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" containerName="container-00" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.000518 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" containerName="container-00" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.000759 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="5a871b06-5ad1-47bf-bdbc-a484ecf8b4af" containerName="container-00" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.002264 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.015364 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w6z9s"] Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.063500 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvdm2\" (UniqueName: \"kubernetes.io/projected/85681287-5674-4ef3-8602-ca9ca0e74d5d-kube-api-access-pvdm2\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.063914 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-utilities\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.064033 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-catalog-content\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.167563 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-utilities\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.167658 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-catalog-content\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.167799 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvdm2\" (UniqueName: \"kubernetes.io/projected/85681287-5674-4ef3-8602-ca9ca0e74d5d-kube-api-access-pvdm2\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.168127 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-utilities\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.168477 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-catalog-content\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.185045 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvdm2\" (UniqueName: \"kubernetes.io/projected/85681287-5674-4ef3-8602-ca9ca0e74d5d-kube-api-access-pvdm2\") pod \"certified-operators-w6z9s\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.320329 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:22 crc kubenswrapper[4693]: I1122 09:57:22.653652 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-w6z9s"] Nov 22 09:57:23 crc kubenswrapper[4693]: I1122 09:57:23.601819 4693 generic.go:334] "Generic (PLEG): container finished" podID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerID="fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7" exitCode=0 Nov 22 09:57:23 crc kubenswrapper[4693]: I1122 09:57:23.602000 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w6z9s" event={"ID":"85681287-5674-4ef3-8602-ca9ca0e74d5d","Type":"ContainerDied","Data":"fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7"} Nov 22 09:57:23 crc kubenswrapper[4693]: I1122 09:57:23.602432 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w6z9s" event={"ID":"85681287-5674-4ef3-8602-ca9ca0e74d5d","Type":"ContainerStarted","Data":"3eb2b0b112c2620499e95625437e544fe91ba2f2b22703834cc2ee98c629106a"} Nov 22 09:57:23 crc kubenswrapper[4693]: I1122 09:57:23.606163 4693 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 22 09:57:24 crc kubenswrapper[4693]: I1122 09:57:24.610925 4693 generic.go:334] "Generic (PLEG): container finished" podID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerID="8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0" exitCode=0 Nov 22 09:57:24 crc kubenswrapper[4693]: I1122 09:57:24.610988 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w6z9s" event={"ID":"85681287-5674-4ef3-8602-ca9ca0e74d5d","Type":"ContainerDied","Data":"8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0"} Nov 22 09:57:24 crc kubenswrapper[4693]: E1122 09:57:24.694704 4693 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod85681287_5674_4ef3_8602_ca9ca0e74d5d.slice/crio-8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0.scope\": RecentStats: unable to find data in memory cache]" Nov 22 09:57:25 crc kubenswrapper[4693]: I1122 09:57:25.619370 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w6z9s" event={"ID":"85681287-5674-4ef3-8602-ca9ca0e74d5d","Type":"ContainerStarted","Data":"e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba"} Nov 22 09:57:25 crc kubenswrapper[4693]: I1122 09:57:25.640018 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-w6z9s" podStartSLOduration=3.164302511 podStartE2EDuration="4.64000164s" podCreationTimestamp="2025-11-22 09:57:21 +0000 UTC" firstStartedPulling="2025-11-22 09:57:23.605916765 +0000 UTC m=+3239.748419056" lastFinishedPulling="2025-11-22 09:57:25.081615894 +0000 UTC m=+3241.224118185" observedRunningTime="2025-11-22 09:57:25.631782246 +0000 UTC m=+3241.774284537" watchObservedRunningTime="2025-11-22 09:57:25.64000164 +0000 UTC m=+3241.782503932" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.271889 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-j5p4z_a3aed62f-5abf-4446-9c61-1618025ddc52/kube-rbac-proxy/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.320415 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.321657 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.357316 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.368150 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6c7b4b5f48-j5p4z_a3aed62f-5abf-4446-9c61-1618025ddc52/controller/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.487340 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.635879 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.636291 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.672588 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.682224 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.715803 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.762776 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w6z9s"] Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.838238 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.871739 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.882333 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:57:32 crc kubenswrapper[4693]: I1122 09:57:32.927714 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.077378 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-metrics/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.099878 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-reloader/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.099897 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/cp-frr-files/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.122734 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/controller/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.266755 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/frr-metrics/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.300717 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/kube-rbac-proxy/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.308481 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/kube-rbac-proxy-frr/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.497127 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/reloader/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.574198 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-6998585d5-dlqvh_d3923021-6731-4289-a3ce-f78f990d6d61/frr-k8s-webhook-server/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.775947 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-688d9f6dd-24fdn_04bf9ebb-5541-4144-879a-1ac25382249d/manager/0.log" Nov 22 09:57:33 crc kubenswrapper[4693]: I1122 09:57:33.907596 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-df9cf6-744f8_d60d2419-520e-4156-9d05-3b174542f80e/webhook-server/0.log" Nov 22 09:57:34 crc kubenswrapper[4693]: I1122 09:57:34.065503 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5gdzr_a9bc621d-a044-4e19-99d8-297dd534f390/kube-rbac-proxy/0.log" Nov 22 09:57:34 crc kubenswrapper[4693]: I1122 09:57:34.447469 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-rps7r_22957d7b-20a3-4b2d-af88-d0a93924eec8/frr/0.log" Nov 22 09:57:34 crc kubenswrapper[4693]: I1122 09:57:34.517406 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-5gdzr_a9bc621d-a044-4e19-99d8-297dd534f390/speaker/0.log" Nov 22 09:57:34 crc kubenswrapper[4693]: I1122 09:57:34.695353 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-w6z9s" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="registry-server" containerID="cri-o://e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba" gracePeriod=2 Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.596147 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.714113 4693 generic.go:334] "Generic (PLEG): container finished" podID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerID="e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba" exitCode=0 Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.714165 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w6z9s" event={"ID":"85681287-5674-4ef3-8602-ca9ca0e74d5d","Type":"ContainerDied","Data":"e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba"} Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.714188 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-w6z9s" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.714218 4693 scope.go:117] "RemoveContainer" containerID="e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.714199 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-w6z9s" event={"ID":"85681287-5674-4ef3-8602-ca9ca0e74d5d","Type":"ContainerDied","Data":"3eb2b0b112c2620499e95625437e544fe91ba2f2b22703834cc2ee98c629106a"} Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.728303 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvdm2\" (UniqueName: \"kubernetes.io/projected/85681287-5674-4ef3-8602-ca9ca0e74d5d-kube-api-access-pvdm2\") pod \"85681287-5674-4ef3-8602-ca9ca0e74d5d\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.728540 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-utilities\") pod \"85681287-5674-4ef3-8602-ca9ca0e74d5d\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.728768 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-catalog-content\") pod \"85681287-5674-4ef3-8602-ca9ca0e74d5d\" (UID: \"85681287-5674-4ef3-8602-ca9ca0e74d5d\") " Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.729507 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-utilities" (OuterVolumeSpecName: "utilities") pod "85681287-5674-4ef3-8602-ca9ca0e74d5d" (UID: "85681287-5674-4ef3-8602-ca9ca0e74d5d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.734169 4693 scope.go:117] "RemoveContainer" containerID="8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.735377 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85681287-5674-4ef3-8602-ca9ca0e74d5d-kube-api-access-pvdm2" (OuterVolumeSpecName: "kube-api-access-pvdm2") pod "85681287-5674-4ef3-8602-ca9ca0e74d5d" (UID: "85681287-5674-4ef3-8602-ca9ca0e74d5d"). InnerVolumeSpecName "kube-api-access-pvdm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.773141 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "85681287-5674-4ef3-8602-ca9ca0e74d5d" (UID: "85681287-5674-4ef3-8602-ca9ca0e74d5d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.796349 4693 scope.go:117] "RemoveContainer" containerID="fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.832145 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.832252 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvdm2\" (UniqueName: \"kubernetes.io/projected/85681287-5674-4ef3-8602-ca9ca0e74d5d-kube-api-access-pvdm2\") on node \"crc\" DevicePath \"\"" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.832327 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/85681287-5674-4ef3-8602-ca9ca0e74d5d-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.839263 4693 scope.go:117] "RemoveContainer" containerID="e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba" Nov 22 09:57:35 crc kubenswrapper[4693]: E1122 09:57:35.839740 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba\": container with ID starting with e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba not found: ID does not exist" containerID="e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.839783 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba"} err="failed to get container status \"e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba\": rpc error: code = NotFound desc = could not find container \"e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba\": container with ID starting with e0ab0f7e352edaf5b4e71d99b778a668de760de509980a6bc2ad9eff858abcba not found: ID does not exist" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.839817 4693 scope.go:117] "RemoveContainer" containerID="8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0" Nov 22 09:57:35 crc kubenswrapper[4693]: E1122 09:57:35.841604 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0\": container with ID starting with 8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0 not found: ID does not exist" containerID="8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.841650 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0"} err="failed to get container status \"8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0\": rpc error: code = NotFound desc = could not find container \"8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0\": container with ID starting with 8bda78a07a614dd55e00929f0994d965c9c4821b02cb287248d7a8801dd972c0 not found: ID does not exist" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.841682 4693 scope.go:117] "RemoveContainer" containerID="fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7" Nov 22 09:57:35 crc kubenswrapper[4693]: E1122 09:57:35.842107 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7\": container with ID starting with fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7 not found: ID does not exist" containerID="fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7" Nov 22 09:57:35 crc kubenswrapper[4693]: I1122 09:57:35.842138 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7"} err="failed to get container status \"fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7\": rpc error: code = NotFound desc = could not find container \"fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7\": container with ID starting with fabbc97089545baf7a336c001345668135435fe05d2c72304a519bf234666dc7 not found: ID does not exist" Nov 22 09:57:36 crc kubenswrapper[4693]: I1122 09:57:36.047542 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-w6z9s"] Nov 22 09:57:36 crc kubenswrapper[4693]: I1122 09:57:36.052648 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-w6z9s"] Nov 22 09:57:36 crc kubenswrapper[4693]: I1122 09:57:36.159492 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" path="/var/lib/kubelet/pods/85681287-5674-4ef3-8602-ca9ca0e74d5d/volumes" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.437516 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/util/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.588019 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/pull/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.625136 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/util/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.668189 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/pull/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.789027 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/util/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.790015 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/extract/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.790068 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_5c796334424b8139919e908729ac8fe5c1f6e7b6bc33540f00b4f8772ev2kcq_d8efb62f-81ca-419d-aff7-56b948083857/pull/0.log" Nov 22 09:57:45 crc kubenswrapper[4693]: I1122 09:57:45.957708 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-utilities/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.099512 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-utilities/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.101096 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-content/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.109314 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-content/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.248797 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-utilities/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.261716 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/extract-content/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.452098 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-utilities/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.663011 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-fh447_61a2c2b4-2853-480e-8515-b60a0e915a21/registry-server/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.687530 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-content/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.689972 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-utilities/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.715209 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-content/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.868373 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-utilities/0.log" Nov 22 09:57:46 crc kubenswrapper[4693]: I1122 09:57:46.870738 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/extract-content/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.088057 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/util/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.261158 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-k6mmk_462930d8-4523-48fd-8fb0-fb2a23ff0445/registry-server/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.292378 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/util/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.293341 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/pull/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.342994 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/pull/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.498259 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/util/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.498353 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/extract/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.514048 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_e8527aae5664f20f24bf3bbb3fd2981ba838928a8a47ce599ee258e4c6snwlg_650bf5b9-0977-4b28-bd9d-8d2518dfc4d7/pull/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.665805 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-mrzgw_651abef9-77a9-4b60-9522-af17781c7a4b/marketplace-operator/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.690570 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-utilities/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.843566 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-content/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.846783 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-utilities/0.log" Nov 22 09:57:47 crc kubenswrapper[4693]: I1122 09:57:47.870639 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-content/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.004865 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-content/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.005694 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/extract-utilities/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.140461 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-97l4g_98f8038a-33d0-416d-bbb8-54004a3fb6fe/registry-server/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.201732 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-utilities/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.354202 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-utilities/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.363490 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-content/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.366464 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-content/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.518445 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-utilities/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.536775 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/extract-content/0.log" Nov 22 09:57:48 crc kubenswrapper[4693]: I1122 09:57:48.887298 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-9bbbm_75427b74-79ce-4837-95d0-a5b9b25ac98d/registry-server/0.log" Nov 22 09:58:15 crc kubenswrapper[4693]: E1122 09:58:15.100911 4693 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 192.168.25.249:33818->192.168.25.249:44515: write tcp 192.168.25.249:33818->192.168.25.249:44515: write: broken pipe Nov 22 09:58:30 crc kubenswrapper[4693]: I1122 09:58:30.247146 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:58:30 crc kubenswrapper[4693]: I1122 09:58:30.247817 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.011571 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-wb8nv"] Nov 22 09:58:39 crc kubenswrapper[4693]: E1122 09:58:39.012423 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="extract-content" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.012436 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="extract-content" Nov 22 09:58:39 crc kubenswrapper[4693]: E1122 09:58:39.012445 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="extract-utilities" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.012450 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="extract-utilities" Nov 22 09:58:39 crc kubenswrapper[4693]: E1122 09:58:39.012474 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="registry-server" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.012479 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="registry-server" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.012636 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="85681287-5674-4ef3-8602-ca9ca0e74d5d" containerName="registry-server" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.013936 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.028052 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb8nv"] Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.203957 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd5pb\" (UniqueName: \"kubernetes.io/projected/0501e6c6-6b13-40cf-a657-d20e713365c9-kube-api-access-hd5pb\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.204029 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-catalog-content\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.204160 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-utilities\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.307022 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-utilities\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.307721 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd5pb\" (UniqueName: \"kubernetes.io/projected/0501e6c6-6b13-40cf-a657-d20e713365c9-kube-api-access-hd5pb\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.307783 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-catalog-content\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.308865 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-utilities\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.309412 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-catalog-content\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.328894 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd5pb\" (UniqueName: \"kubernetes.io/projected/0501e6c6-6b13-40cf-a657-d20e713365c9-kube-api-access-hd5pb\") pod \"redhat-marketplace-wb8nv\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.341134 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:39 crc kubenswrapper[4693]: I1122 09:58:39.775960 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb8nv"] Nov 22 09:58:40 crc kubenswrapper[4693]: I1122 09:58:40.315299 4693 generic.go:334] "Generic (PLEG): container finished" podID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerID="4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53" exitCode=0 Nov 22 09:58:40 crc kubenswrapper[4693]: I1122 09:58:40.315663 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb8nv" event={"ID":"0501e6c6-6b13-40cf-a657-d20e713365c9","Type":"ContainerDied","Data":"4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53"} Nov 22 09:58:40 crc kubenswrapper[4693]: I1122 09:58:40.315702 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb8nv" event={"ID":"0501e6c6-6b13-40cf-a657-d20e713365c9","Type":"ContainerStarted","Data":"bc5eb5634a9a50b0d45f6db47d5a9e875993bdee3b55d063a268931f6cf6e99b"} Nov 22 09:58:41 crc kubenswrapper[4693]: I1122 09:58:41.328492 4693 generic.go:334] "Generic (PLEG): container finished" podID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerID="7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406" exitCode=0 Nov 22 09:58:41 crc kubenswrapper[4693]: I1122 09:58:41.328675 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb8nv" event={"ID":"0501e6c6-6b13-40cf-a657-d20e713365c9","Type":"ContainerDied","Data":"7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406"} Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.004331 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-n57dn"] Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.006518 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.012183 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n57dn"] Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.168932 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m77q5\" (UniqueName: \"kubernetes.io/projected/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-kube-api-access-m77q5\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.169269 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-utilities\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.169304 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-catalog-content\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.271426 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m77q5\" (UniqueName: \"kubernetes.io/projected/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-kube-api-access-m77q5\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.271505 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-utilities\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.271532 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-catalog-content\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.272078 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-catalog-content\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.272208 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-utilities\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.288408 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m77q5\" (UniqueName: \"kubernetes.io/projected/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-kube-api-access-m77q5\") pod \"community-operators-n57dn\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.323262 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.344120 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb8nv" event={"ID":"0501e6c6-6b13-40cf-a657-d20e713365c9","Type":"ContainerStarted","Data":"4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6"} Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.366524 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-wb8nv" podStartSLOduration=2.8791799989999998 podStartE2EDuration="4.366513123s" podCreationTimestamp="2025-11-22 09:58:38 +0000 UTC" firstStartedPulling="2025-11-22 09:58:40.318341942 +0000 UTC m=+3316.460844232" lastFinishedPulling="2025-11-22 09:58:41.805675066 +0000 UTC m=+3317.948177356" observedRunningTime="2025-11-22 09:58:42.362719921 +0000 UTC m=+3318.505222202" watchObservedRunningTime="2025-11-22 09:58:42.366513123 +0000 UTC m=+3318.509015415" Nov 22 09:58:42 crc kubenswrapper[4693]: I1122 09:58:42.850333 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-n57dn"] Nov 22 09:58:43 crc kubenswrapper[4693]: I1122 09:58:43.358014 4693 generic.go:334] "Generic (PLEG): container finished" podID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerID="f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938" exitCode=0 Nov 22 09:58:43 crc kubenswrapper[4693]: I1122 09:58:43.358124 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerDied","Data":"f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938"} Nov 22 09:58:43 crc kubenswrapper[4693]: I1122 09:58:43.359475 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerStarted","Data":"cc1e6bf96a85b891e5875f1682abcdb5e48c31165229f88508ea7c9809593bc2"} Nov 22 09:58:44 crc kubenswrapper[4693]: I1122 09:58:44.374582 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerStarted","Data":"d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f"} Nov 22 09:58:45 crc kubenswrapper[4693]: I1122 09:58:45.388903 4693 generic.go:334] "Generic (PLEG): container finished" podID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerID="d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f" exitCode=0 Nov 22 09:58:45 crc kubenswrapper[4693]: I1122 09:58:45.388984 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerDied","Data":"d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f"} Nov 22 09:58:45 crc kubenswrapper[4693]: I1122 09:58:45.389265 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerStarted","Data":"8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc"} Nov 22 09:58:45 crc kubenswrapper[4693]: I1122 09:58:45.412299 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-n57dn" podStartSLOduration=2.914251655 podStartE2EDuration="4.412248328s" podCreationTimestamp="2025-11-22 09:58:41 +0000 UTC" firstStartedPulling="2025-11-22 09:58:43.359769457 +0000 UTC m=+3319.502271749" lastFinishedPulling="2025-11-22 09:58:44.857766132 +0000 UTC m=+3321.000268422" observedRunningTime="2025-11-22 09:58:45.404295275 +0000 UTC m=+3321.546797556" watchObservedRunningTime="2025-11-22 09:58:45.412248328 +0000 UTC m=+3321.554750620" Nov 22 09:58:49 crc kubenswrapper[4693]: I1122 09:58:49.341564 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:49 crc kubenswrapper[4693]: I1122 09:58:49.342095 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:49 crc kubenswrapper[4693]: I1122 09:58:49.382960 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:49 crc kubenswrapper[4693]: I1122 09:58:49.467967 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:49 crc kubenswrapper[4693]: I1122 09:58:49.618602 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb8nv"] Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.445019 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-wb8nv" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="registry-server" containerID="cri-o://4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6" gracePeriod=2 Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.839864 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.888455 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-catalog-content\") pod \"0501e6c6-6b13-40cf-a657-d20e713365c9\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.888539 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd5pb\" (UniqueName: \"kubernetes.io/projected/0501e6c6-6b13-40cf-a657-d20e713365c9-kube-api-access-hd5pb\") pod \"0501e6c6-6b13-40cf-a657-d20e713365c9\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.888590 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-utilities\") pod \"0501e6c6-6b13-40cf-a657-d20e713365c9\" (UID: \"0501e6c6-6b13-40cf-a657-d20e713365c9\") " Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.889467 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-utilities" (OuterVolumeSpecName: "utilities") pod "0501e6c6-6b13-40cf-a657-d20e713365c9" (UID: "0501e6c6-6b13-40cf-a657-d20e713365c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.889991 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.894247 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0501e6c6-6b13-40cf-a657-d20e713365c9-kube-api-access-hd5pb" (OuterVolumeSpecName: "kube-api-access-hd5pb") pod "0501e6c6-6b13-40cf-a657-d20e713365c9" (UID: "0501e6c6-6b13-40cf-a657-d20e713365c9"). InnerVolumeSpecName "kube-api-access-hd5pb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.903590 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0501e6c6-6b13-40cf-a657-d20e713365c9" (UID: "0501e6c6-6b13-40cf-a657-d20e713365c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.992773 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0501e6c6-6b13-40cf-a657-d20e713365c9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:58:51 crc kubenswrapper[4693]: I1122 09:58:51.992818 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd5pb\" (UniqueName: \"kubernetes.io/projected/0501e6c6-6b13-40cf-a657-d20e713365c9-kube-api-access-hd5pb\") on node \"crc\" DevicePath \"\"" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.324116 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.324278 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.365007 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.459322 4693 generic.go:334] "Generic (PLEG): container finished" podID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerID="4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6" exitCode=0 Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.459414 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-wb8nv" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.459439 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb8nv" event={"ID":"0501e6c6-6b13-40cf-a657-d20e713365c9","Type":"ContainerDied","Data":"4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6"} Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.460251 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-wb8nv" event={"ID":"0501e6c6-6b13-40cf-a657-d20e713365c9","Type":"ContainerDied","Data":"bc5eb5634a9a50b0d45f6db47d5a9e875993bdee3b55d063a268931f6cf6e99b"} Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.460284 4693 scope.go:117] "RemoveContainer" containerID="4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.483004 4693 scope.go:117] "RemoveContainer" containerID="7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.485540 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb8nv"] Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.493270 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-wb8nv"] Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.500303 4693 scope.go:117] "RemoveContainer" containerID="4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.506248 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.541367 4693 scope.go:117] "RemoveContainer" containerID="4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6" Nov 22 09:58:52 crc kubenswrapper[4693]: E1122 09:58:52.541823 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6\": container with ID starting with 4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6 not found: ID does not exist" containerID="4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.541884 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6"} err="failed to get container status \"4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6\": rpc error: code = NotFound desc = could not find container \"4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6\": container with ID starting with 4c6db6cbd6e6cff7820a672ca214e3fa667bd875b3bab4de8b0fe851148d6be6 not found: ID does not exist" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.541917 4693 scope.go:117] "RemoveContainer" containerID="7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406" Nov 22 09:58:52 crc kubenswrapper[4693]: E1122 09:58:52.542260 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406\": container with ID starting with 7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406 not found: ID does not exist" containerID="7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.542300 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406"} err="failed to get container status \"7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406\": rpc error: code = NotFound desc = could not find container \"7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406\": container with ID starting with 7d24cd4338220c1fa79f35b31ebb7dd77fdacaf3e21c5314646600bafa306406 not found: ID does not exist" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.542329 4693 scope.go:117] "RemoveContainer" containerID="4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53" Nov 22 09:58:52 crc kubenswrapper[4693]: E1122 09:58:52.542687 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53\": container with ID starting with 4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53 not found: ID does not exist" containerID="4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53" Nov 22 09:58:52 crc kubenswrapper[4693]: I1122 09:58:52.542719 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53"} err="failed to get container status \"4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53\": rpc error: code = NotFound desc = could not find container \"4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53\": container with ID starting with 4c21d2d40a1a98a103ca2e458f696cb8b9d8c38f2363deb2482e5fa6165dea53 not found: ID does not exist" Nov 22 09:58:53 crc kubenswrapper[4693]: I1122 09:58:53.419590 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n57dn"] Nov 22 09:58:54 crc kubenswrapper[4693]: I1122 09:58:54.158359 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" path="/var/lib/kubelet/pods/0501e6c6-6b13-40cf-a657-d20e713365c9/volumes" Nov 22 09:58:54 crc kubenswrapper[4693]: I1122 09:58:54.480378 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-n57dn" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="registry-server" containerID="cri-o://8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc" gracePeriod=2 Nov 22 09:58:54 crc kubenswrapper[4693]: I1122 09:58:54.859351 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.062227 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m77q5\" (UniqueName: \"kubernetes.io/projected/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-kube-api-access-m77q5\") pod \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.062357 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-catalog-content\") pod \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.062532 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-utilities\") pod \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\" (UID: \"991b6ab1-9f4c-4401-8c8b-fc26e41b8317\") " Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.063170 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-utilities" (OuterVolumeSpecName: "utilities") pod "991b6ab1-9f4c-4401-8c8b-fc26e41b8317" (UID: "991b6ab1-9f4c-4401-8c8b-fc26e41b8317"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.063398 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.068579 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-kube-api-access-m77q5" (OuterVolumeSpecName: "kube-api-access-m77q5") pod "991b6ab1-9f4c-4401-8c8b-fc26e41b8317" (UID: "991b6ab1-9f4c-4401-8c8b-fc26e41b8317"). InnerVolumeSpecName "kube-api-access-m77q5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.122203 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "991b6ab1-9f4c-4401-8c8b-fc26e41b8317" (UID: "991b6ab1-9f4c-4401-8c8b-fc26e41b8317"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.165986 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m77q5\" (UniqueName: \"kubernetes.io/projected/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-kube-api-access-m77q5\") on node \"crc\" DevicePath \"\"" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.166021 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/991b6ab1-9f4c-4401-8c8b-fc26e41b8317-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.491350 4693 generic.go:334] "Generic (PLEG): container finished" podID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerID="8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc" exitCode=0 Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.491414 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerDied","Data":"8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc"} Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.491458 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-n57dn" event={"ID":"991b6ab1-9f4c-4401-8c8b-fc26e41b8317","Type":"ContainerDied","Data":"cc1e6bf96a85b891e5875f1682abcdb5e48c31165229f88508ea7c9809593bc2"} Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.491458 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-n57dn" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.491480 4693 scope.go:117] "RemoveContainer" containerID="8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.514352 4693 scope.go:117] "RemoveContainer" containerID="d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.535300 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-n57dn"] Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.541337 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-n57dn"] Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.552676 4693 scope.go:117] "RemoveContainer" containerID="f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.569331 4693 scope.go:117] "RemoveContainer" containerID="8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc" Nov 22 09:58:55 crc kubenswrapper[4693]: E1122 09:58:55.569587 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc\": container with ID starting with 8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc not found: ID does not exist" containerID="8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.569628 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc"} err="failed to get container status \"8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc\": rpc error: code = NotFound desc = could not find container \"8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc\": container with ID starting with 8d05b32bbd26d8f2535430f3692dffd15ad4aad12ca3d49a01e8d8d407fdc5dc not found: ID does not exist" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.569657 4693 scope.go:117] "RemoveContainer" containerID="d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f" Nov 22 09:58:55 crc kubenswrapper[4693]: E1122 09:58:55.569937 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f\": container with ID starting with d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f not found: ID does not exist" containerID="d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.569977 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f"} err="failed to get container status \"d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f\": rpc error: code = NotFound desc = could not find container \"d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f\": container with ID starting with d87bfc729cc679373908a4f7041757da214094d53d9f93531cf16ff6027cdd5f not found: ID does not exist" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.570018 4693 scope.go:117] "RemoveContainer" containerID="f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938" Nov 22 09:58:55 crc kubenswrapper[4693]: E1122 09:58:55.570252 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938\": container with ID starting with f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938 not found: ID does not exist" containerID="f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938" Nov 22 09:58:55 crc kubenswrapper[4693]: I1122 09:58:55.570282 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938"} err="failed to get container status \"f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938\": rpc error: code = NotFound desc = could not find container \"f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938\": container with ID starting with f79ac7854b474da637b6d21685a66aa8cd74542c1a492170d8c666bffe245938 not found: ID does not exist" Nov 22 09:58:56 crc kubenswrapper[4693]: I1122 09:58:56.157630 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" path="/var/lib/kubelet/pods/991b6ab1-9f4c-4401-8c8b-fc26e41b8317/volumes" Nov 22 09:59:00 crc kubenswrapper[4693]: I1122 09:59:00.246280 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:59:00 crc kubenswrapper[4693]: I1122 09:59:00.247328 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:59:10 crc kubenswrapper[4693]: I1122 09:59:10.639753 4693 generic.go:334] "Generic (PLEG): container finished" podID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerID="b585132fd1a566db3b2a032e44254f78b5035bdc9f6910330d0af6e14df00016" exitCode=0 Nov 22 09:59:10 crc kubenswrapper[4693]: I1122 09:59:10.639825 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-qffbp/must-gather-kzwdq" event={"ID":"ccb41945-1c9c-4ee6-a791-e16fdfb849f7","Type":"ContainerDied","Data":"b585132fd1a566db3b2a032e44254f78b5035bdc9f6910330d0af6e14df00016"} Nov 22 09:59:10 crc kubenswrapper[4693]: I1122 09:59:10.642434 4693 scope.go:117] "RemoveContainer" containerID="b585132fd1a566db3b2a032e44254f78b5035bdc9f6910330d0af6e14df00016" Nov 22 09:59:11 crc kubenswrapper[4693]: I1122 09:59:11.207370 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qffbp_must-gather-kzwdq_ccb41945-1c9c-4ee6-a791-e16fdfb849f7/gather/0.log" Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.590762 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-qffbp/must-gather-kzwdq"] Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.591559 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-qffbp/must-gather-kzwdq" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="copy" containerID="cri-o://e7c8bfe1311b37ab89739074062ce069346e1fadee52529c6d6bc71f5c7014c1" gracePeriod=2 Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.597585 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-qffbp/must-gather-kzwdq"] Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.738316 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qffbp_must-gather-kzwdq_ccb41945-1c9c-4ee6-a791-e16fdfb849f7/copy/0.log" Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.738914 4693 generic.go:334] "Generic (PLEG): container finished" podID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerID="e7c8bfe1311b37ab89739074062ce069346e1fadee52529c6d6bc71f5c7014c1" exitCode=143 Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.992773 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qffbp_must-gather-kzwdq_ccb41945-1c9c-4ee6-a791-e16fdfb849f7/copy/0.log" Nov 22 09:59:19 crc kubenswrapper[4693]: I1122 09:59:19.993678 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.114062 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fgpkt\" (UniqueName: \"kubernetes.io/projected/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-kube-api-access-fgpkt\") pod \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.114156 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-must-gather-output\") pod \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\" (UID: \"ccb41945-1c9c-4ee6-a791-e16fdfb849f7\") " Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.120581 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-kube-api-access-fgpkt" (OuterVolumeSpecName: "kube-api-access-fgpkt") pod "ccb41945-1c9c-4ee6-a791-e16fdfb849f7" (UID: "ccb41945-1c9c-4ee6-a791-e16fdfb849f7"). InnerVolumeSpecName "kube-api-access-fgpkt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.217936 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fgpkt\" (UniqueName: \"kubernetes.io/projected/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-kube-api-access-fgpkt\") on node \"crc\" DevicePath \"\"" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.235439 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "ccb41945-1c9c-4ee6-a791-e16fdfb849f7" (UID: "ccb41945-1c9c-4ee6-a791-e16fdfb849f7"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.320823 4693 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/ccb41945-1c9c-4ee6-a791-e16fdfb849f7-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.748983 4693 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-qffbp_must-gather-kzwdq_ccb41945-1c9c-4ee6-a791-e16fdfb849f7/copy/0.log" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.749325 4693 scope.go:117] "RemoveContainer" containerID="e7c8bfe1311b37ab89739074062ce069346e1fadee52529c6d6bc71f5c7014c1" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.749462 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-qffbp/must-gather-kzwdq" Nov 22 09:59:20 crc kubenswrapper[4693]: I1122 09:59:20.782730 4693 scope.go:117] "RemoveContainer" containerID="b585132fd1a566db3b2a032e44254f78b5035bdc9f6910330d0af6e14df00016" Nov 22 09:59:22 crc kubenswrapper[4693]: I1122 09:59:22.157603 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" path="/var/lib/kubelet/pods/ccb41945-1c9c-4ee6-a791-e16fdfb849f7/volumes" Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.246427 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.247122 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.247171 4693 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.247726 4693 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"79aed3d107804d7115879643c78a9a6b579059fa4c496c597a5d78b7f00bfa93"} pod="openshift-machine-config-operator/machine-config-daemon-scx6r" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.247790 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" containerID="cri-o://79aed3d107804d7115879643c78a9a6b579059fa4c496c597a5d78b7f00bfa93" gracePeriod=600 Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.839679 4693 generic.go:334] "Generic (PLEG): container finished" podID="7007d901-fc52-4723-a949-db71619b3305" containerID="79aed3d107804d7115879643c78a9a6b579059fa4c496c597a5d78b7f00bfa93" exitCode=0 Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.840276 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerDied","Data":"79aed3d107804d7115879643c78a9a6b579059fa4c496c597a5d78b7f00bfa93"} Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.840354 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" event={"ID":"7007d901-fc52-4723-a949-db71619b3305","Type":"ContainerStarted","Data":"3fba733ff4fd53abc8e4270e149861fc3d864f239ad5870a8582af1c61e9e5e8"} Nov 22 09:59:30 crc kubenswrapper[4693]: I1122 09:59:30.840400 4693 scope.go:117] "RemoveContainer" containerID="268f8c36d9989849e458b69c287c09d2b7bd0ac9f3412c76ed8874750a23ffba" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.157715 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz"] Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158610 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="extract-utilities" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158626 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="extract-utilities" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158646 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="registry-server" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158653 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="registry-server" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158667 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="gather" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158672 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="gather" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158679 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="extract-utilities" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158687 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="extract-utilities" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158702 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="registry-server" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158709 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="registry-server" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158727 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="extract-content" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158733 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="extract-content" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158748 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="extract-content" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158753 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="extract-content" Nov 22 10:00:00 crc kubenswrapper[4693]: E1122 10:00:00.158767 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="copy" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.158773 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="copy" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.159024 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="copy" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.159036 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="991b6ab1-9f4c-4401-8c8b-fc26e41b8317" containerName="registry-server" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.159042 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccb41945-1c9c-4ee6-a791-e16fdfb849f7" containerName="gather" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.159055 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="0501e6c6-6b13-40cf-a657-d20e713365c9" containerName="registry-server" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.159786 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.161752 4693 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.163176 4693 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.168374 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz"] Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.322931 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-66ggs\" (UniqueName: \"kubernetes.io/projected/f5fb9de3-517d-400e-8b60-d7e6d5b67052-kube-api-access-66ggs\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.323255 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5fb9de3-517d-400e-8b60-d7e6d5b67052-secret-volume\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.323432 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5fb9de3-517d-400e-8b60-d7e6d5b67052-config-volume\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.425826 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5fb9de3-517d-400e-8b60-d7e6d5b67052-secret-volume\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.425900 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5fb9de3-517d-400e-8b60-d7e6d5b67052-config-volume\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.425976 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-66ggs\" (UniqueName: \"kubernetes.io/projected/f5fb9de3-517d-400e-8b60-d7e6d5b67052-kube-api-access-66ggs\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.428378 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5fb9de3-517d-400e-8b60-d7e6d5b67052-config-volume\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.432993 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5fb9de3-517d-400e-8b60-d7e6d5b67052-secret-volume\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.443389 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-66ggs\" (UniqueName: \"kubernetes.io/projected/f5fb9de3-517d-400e-8b60-d7e6d5b67052-kube-api-access-66ggs\") pod \"collect-profiles-29396760-gqhxz\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.480511 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:00 crc kubenswrapper[4693]: I1122 10:00:00.904386 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz"] Nov 22 10:00:01 crc kubenswrapper[4693]: I1122 10:00:01.125037 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" event={"ID":"f5fb9de3-517d-400e-8b60-d7e6d5b67052","Type":"ContainerStarted","Data":"9d4d457ee549fc248225c064c5f3e3792312fcdbed47231f75785d7ba7188564"} Nov 22 10:00:01 crc kubenswrapper[4693]: I1122 10:00:01.125288 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" event={"ID":"f5fb9de3-517d-400e-8b60-d7e6d5b67052","Type":"ContainerStarted","Data":"758b01bab263a6a28b45b84b3cfe793ca46f0cdf606fa2c3dcb4a408b28e363c"} Nov 22 10:00:01 crc kubenswrapper[4693]: I1122 10:00:01.149577 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" podStartSLOduration=1.149560024 podStartE2EDuration="1.149560024s" podCreationTimestamp="2025-11-22 10:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:00:01.147213312 +0000 UTC m=+3397.289715613" watchObservedRunningTime="2025-11-22 10:00:01.149560024 +0000 UTC m=+3397.292062315" Nov 22 10:00:02 crc kubenswrapper[4693]: I1122 10:00:02.137566 4693 generic.go:334] "Generic (PLEG): container finished" podID="f5fb9de3-517d-400e-8b60-d7e6d5b67052" containerID="9d4d457ee549fc248225c064c5f3e3792312fcdbed47231f75785d7ba7188564" exitCode=0 Nov 22 10:00:02 crc kubenswrapper[4693]: I1122 10:00:02.137654 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" event={"ID":"f5fb9de3-517d-400e-8b60-d7e6d5b67052","Type":"ContainerDied","Data":"9d4d457ee549fc248225c064c5f3e3792312fcdbed47231f75785d7ba7188564"} Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.448523 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.501425 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-66ggs\" (UniqueName: \"kubernetes.io/projected/f5fb9de3-517d-400e-8b60-d7e6d5b67052-kube-api-access-66ggs\") pod \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.501643 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5fb9de3-517d-400e-8b60-d7e6d5b67052-secret-volume\") pod \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.501703 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5fb9de3-517d-400e-8b60-d7e6d5b67052-config-volume\") pod \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\" (UID: \"f5fb9de3-517d-400e-8b60-d7e6d5b67052\") " Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.502328 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f5fb9de3-517d-400e-8b60-d7e6d5b67052-config-volume" (OuterVolumeSpecName: "config-volume") pod "f5fb9de3-517d-400e-8b60-d7e6d5b67052" (UID: "f5fb9de3-517d-400e-8b60-d7e6d5b67052"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.508222 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f5fb9de3-517d-400e-8b60-d7e6d5b67052-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "f5fb9de3-517d-400e-8b60-d7e6d5b67052" (UID: "f5fb9de3-517d-400e-8b60-d7e6d5b67052"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.511047 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f5fb9de3-517d-400e-8b60-d7e6d5b67052-kube-api-access-66ggs" (OuterVolumeSpecName: "kube-api-access-66ggs") pod "f5fb9de3-517d-400e-8b60-d7e6d5b67052" (UID: "f5fb9de3-517d-400e-8b60-d7e6d5b67052"). InnerVolumeSpecName "kube-api-access-66ggs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.604965 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-66ggs\" (UniqueName: \"kubernetes.io/projected/f5fb9de3-517d-400e-8b60-d7e6d5b67052-kube-api-access-66ggs\") on node \"crc\" DevicePath \"\"" Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.605001 4693 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/f5fb9de3-517d-400e-8b60-d7e6d5b67052-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:00:03 crc kubenswrapper[4693]: I1122 10:00:03.605022 4693 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/f5fb9de3-517d-400e-8b60-d7e6d5b67052-config-volume\") on node \"crc\" DevicePath \"\"" Nov 22 10:00:04 crc kubenswrapper[4693]: I1122 10:00:04.157054 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" Nov 22 10:00:04 crc kubenswrapper[4693]: I1122 10:00:04.158434 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29396760-gqhxz" event={"ID":"f5fb9de3-517d-400e-8b60-d7e6d5b67052","Type":"ContainerDied","Data":"758b01bab263a6a28b45b84b3cfe793ca46f0cdf606fa2c3dcb4a408b28e363c"} Nov 22 10:00:04 crc kubenswrapper[4693]: I1122 10:00:04.158484 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="758b01bab263a6a28b45b84b3cfe793ca46f0cdf606fa2c3dcb4a408b28e363c" Nov 22 10:00:04 crc kubenswrapper[4693]: I1122 10:00:04.525714 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9"] Nov 22 10:00:04 crc kubenswrapper[4693]: I1122 10:00:04.532518 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29396715-t8gd9"] Nov 22 10:00:06 crc kubenswrapper[4693]: I1122 10:00:06.159969 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0de4e9b7-704c-4f94-92fb-92faf93a2795" path="/var/lib/kubelet/pods/0de4e9b7-704c-4f94-92fb-92faf93a2795/volumes" Nov 22 10:00:33 crc kubenswrapper[4693]: I1122 10:00:33.840218 4693 scope.go:117] "RemoveContainer" containerID="9bf570ac5f90d891a2fe8276a6fd8bde7592fe6cfd7535ada7b8f29592db2ff1" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.496946 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8pcsd"] Nov 22 10:00:35 crc kubenswrapper[4693]: E1122 10:00:35.497611 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f5fb9de3-517d-400e-8b60-d7e6d5b67052" containerName="collect-profiles" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.497631 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="f5fb9de3-517d-400e-8b60-d7e6d5b67052" containerName="collect-profiles" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.497887 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="f5fb9de3-517d-400e-8b60-d7e6d5b67052" containerName="collect-profiles" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.499230 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.508270 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8pcsd"] Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.534932 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-catalog-content\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.534997 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpkqv\" (UniqueName: \"kubernetes.io/projected/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-kube-api-access-tpkqv\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.535130 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-utilities\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.636408 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpkqv\" (UniqueName: \"kubernetes.io/projected/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-kube-api-access-tpkqv\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.636490 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-utilities\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.636675 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-catalog-content\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.637119 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-utilities\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.637154 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-catalog-content\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.655650 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpkqv\" (UniqueName: \"kubernetes.io/projected/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-kube-api-access-tpkqv\") pod \"redhat-operators-8pcsd\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:35 crc kubenswrapper[4693]: I1122 10:00:35.816834 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:36 crc kubenswrapper[4693]: I1122 10:00:36.224817 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8pcsd"] Nov 22 10:00:36 crc kubenswrapper[4693]: I1122 10:00:36.475957 4693 generic.go:334] "Generic (PLEG): container finished" podID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerID="4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e" exitCode=0 Nov 22 10:00:36 crc kubenswrapper[4693]: I1122 10:00:36.476037 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerDied","Data":"4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e"} Nov 22 10:00:36 crc kubenswrapper[4693]: I1122 10:00:36.476258 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerStarted","Data":"3998a554ebdff2ae1460c5616dcd95b7f3bdc9699be6365945559cca3bddae08"} Nov 22 10:00:37 crc kubenswrapper[4693]: I1122 10:00:37.499804 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerStarted","Data":"d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf"} Nov 22 10:00:38 crc kubenswrapper[4693]: I1122 10:00:38.511651 4693 generic.go:334] "Generic (PLEG): container finished" podID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerID="d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf" exitCode=0 Nov 22 10:00:38 crc kubenswrapper[4693]: I1122 10:00:38.511711 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerDied","Data":"d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf"} Nov 22 10:00:39 crc kubenswrapper[4693]: I1122 10:00:39.525497 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerStarted","Data":"1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97"} Nov 22 10:00:39 crc kubenswrapper[4693]: I1122 10:00:39.547984 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8pcsd" podStartSLOduration=1.9468738110000001 podStartE2EDuration="4.547960534s" podCreationTimestamp="2025-11-22 10:00:35 +0000 UTC" firstStartedPulling="2025-11-22 10:00:36.477438796 +0000 UTC m=+3432.619941087" lastFinishedPulling="2025-11-22 10:00:39.07852552 +0000 UTC m=+3435.221027810" observedRunningTime="2025-11-22 10:00:39.542520554 +0000 UTC m=+3435.685022846" watchObservedRunningTime="2025-11-22 10:00:39.547960534 +0000 UTC m=+3435.690462825" Nov 22 10:00:45 crc kubenswrapper[4693]: I1122 10:00:45.817059 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:45 crc kubenswrapper[4693]: I1122 10:00:45.817728 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:45 crc kubenswrapper[4693]: I1122 10:00:45.857308 4693 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:46 crc kubenswrapper[4693]: I1122 10:00:46.648867 4693 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:46 crc kubenswrapper[4693]: I1122 10:00:46.691288 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8pcsd"] Nov 22 10:00:48 crc kubenswrapper[4693]: I1122 10:00:48.627609 4693 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8pcsd" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="registry-server" containerID="cri-o://1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97" gracePeriod=2 Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.544876 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.636657 4693 generic.go:334] "Generic (PLEG): container finished" podID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerID="1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97" exitCode=0 Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.636711 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerDied","Data":"1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97"} Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.636743 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8pcsd" event={"ID":"6dbd5b12-1206-4002-b1ed-4f2df1e37c38","Type":"ContainerDied","Data":"3998a554ebdff2ae1460c5616dcd95b7f3bdc9699be6365945559cca3bddae08"} Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.636762 4693 scope.go:117] "RemoveContainer" containerID="1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.636913 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8pcsd" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.652223 4693 scope.go:117] "RemoveContainer" containerID="d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.666872 4693 scope.go:117] "RemoveContainer" containerID="4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.708802 4693 scope.go:117] "RemoveContainer" containerID="1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97" Nov 22 10:00:49 crc kubenswrapper[4693]: E1122 10:00:49.709427 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97\": container with ID starting with 1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97 not found: ID does not exist" containerID="1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.709475 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97"} err="failed to get container status \"1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97\": rpc error: code = NotFound desc = could not find container \"1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97\": container with ID starting with 1f05eba76884b53fb23775e35174b0feaad7a8bde68f7d3c8b95802bc9c4da97 not found: ID does not exist" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.709508 4693 scope.go:117] "RemoveContainer" containerID="d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf" Nov 22 10:00:49 crc kubenswrapper[4693]: E1122 10:00:49.709894 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf\": container with ID starting with d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf not found: ID does not exist" containerID="d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.709946 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf"} err="failed to get container status \"d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf\": rpc error: code = NotFound desc = could not find container \"d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf\": container with ID starting with d819dd7b69bd2d1408329b77e017cbf71c2f3bf2bb2d277d7d16b52d352cccdf not found: ID does not exist" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.709976 4693 scope.go:117] "RemoveContainer" containerID="4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e" Nov 22 10:00:49 crc kubenswrapper[4693]: E1122 10:00:49.710288 4693 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e\": container with ID starting with 4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e not found: ID does not exist" containerID="4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.710318 4693 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e"} err="failed to get container status \"4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e\": rpc error: code = NotFound desc = could not find container \"4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e\": container with ID starting with 4083e2aa250d36ddd9979a6baf9c98aaf5dd72d98686ce49a4ec948c0eb0a85e not found: ID does not exist" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.715317 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tpkqv\" (UniqueName: \"kubernetes.io/projected/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-kube-api-access-tpkqv\") pod \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.715526 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-catalog-content\") pod \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.715624 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-utilities\") pod \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\" (UID: \"6dbd5b12-1206-4002-b1ed-4f2df1e37c38\") " Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.716568 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-utilities" (OuterVolumeSpecName: "utilities") pod "6dbd5b12-1206-4002-b1ed-4f2df1e37c38" (UID: "6dbd5b12-1206-4002-b1ed-4f2df1e37c38"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.722041 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-kube-api-access-tpkqv" (OuterVolumeSpecName: "kube-api-access-tpkqv") pod "6dbd5b12-1206-4002-b1ed-4f2df1e37c38" (UID: "6dbd5b12-1206-4002-b1ed-4f2df1e37c38"). InnerVolumeSpecName "kube-api-access-tpkqv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.803760 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "6dbd5b12-1206-4002-b1ed-4f2df1e37c38" (UID: "6dbd5b12-1206-4002-b1ed-4f2df1e37c38"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.818685 4693 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-utilities\") on node \"crc\" DevicePath \"\"" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.818716 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tpkqv\" (UniqueName: \"kubernetes.io/projected/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-kube-api-access-tpkqv\") on node \"crc\" DevicePath \"\"" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.818729 4693 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/6dbd5b12-1206-4002-b1ed-4f2df1e37c38-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.974690 4693 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8pcsd"] Nov 22 10:00:49 crc kubenswrapper[4693]: I1122 10:00:49.986260 4693 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-8pcsd"] Nov 22 10:00:50 crc kubenswrapper[4693]: I1122 10:00:50.158819 4693 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" path="/var/lib/kubelet/pods/6dbd5b12-1206-4002-b1ed-4f2df1e37c38/volumes" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.146350 4693 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29396761-nd4mr"] Nov 22 10:01:00 crc kubenswrapper[4693]: E1122 10:01:00.147346 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="registry-server" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.147363 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="registry-server" Nov 22 10:01:00 crc kubenswrapper[4693]: E1122 10:01:00.147386 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="extract-content" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.147392 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="extract-content" Nov 22 10:01:00 crc kubenswrapper[4693]: E1122 10:01:00.147413 4693 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="extract-utilities" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.147419 4693 state_mem.go:107] "Deleted CPUSet assignment" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="extract-utilities" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.147627 4693 memory_manager.go:354] "RemoveStaleState removing state" podUID="6dbd5b12-1206-4002-b1ed-4f2df1e37c38" containerName="registry-server" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.148959 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.159230 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396761-nd4mr"] Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.326911 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-fernet-keys\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.327066 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tqhlj\" (UniqueName: \"kubernetes.io/projected/d9149bb7-d679-4b93-a2bc-a59c96d29817-kube-api-access-tqhlj\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.328647 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-config-data\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.328917 4693 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-combined-ca-bundle\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.431082 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-fernet-keys\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.432129 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tqhlj\" (UniqueName: \"kubernetes.io/projected/d9149bb7-d679-4b93-a2bc-a59c96d29817-kube-api-access-tqhlj\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.432314 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-config-data\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.432403 4693 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-combined-ca-bundle\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.439141 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-fernet-keys\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.439266 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-config-data\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.440873 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-combined-ca-bundle\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.448607 4693 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tqhlj\" (UniqueName: \"kubernetes.io/projected/d9149bb7-d679-4b93-a2bc-a59c96d29817-kube-api-access-tqhlj\") pod \"keystone-cron-29396761-nd4mr\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.472454 4693 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:00 crc kubenswrapper[4693]: I1122 10:01:00.886652 4693 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29396761-nd4mr"] Nov 22 10:01:01 crc kubenswrapper[4693]: I1122 10:01:01.753135 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396761-nd4mr" event={"ID":"d9149bb7-d679-4b93-a2bc-a59c96d29817","Type":"ContainerStarted","Data":"1cf9775393972c0e7d5cff3081ac4d8c5b092aa43dc228cf3d6d8f89d10e5906"} Nov 22 10:01:01 crc kubenswrapper[4693]: I1122 10:01:01.753394 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396761-nd4mr" event={"ID":"d9149bb7-d679-4b93-a2bc-a59c96d29817","Type":"ContainerStarted","Data":"cfe6589589eecfef556d2a3a666797b9de63f989f2b53bb8dfb081896c54892e"} Nov 22 10:01:01 crc kubenswrapper[4693]: I1122 10:01:01.773125 4693 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29396761-nd4mr" podStartSLOduration=1.773106893 podStartE2EDuration="1.773106893s" podCreationTimestamp="2025-11-22 10:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-22 10:01:01.765581101 +0000 UTC m=+3457.908083392" watchObservedRunningTime="2025-11-22 10:01:01.773106893 +0000 UTC m=+3457.915609183" Nov 22 10:01:02 crc kubenswrapper[4693]: I1122 10:01:02.764040 4693 generic.go:334] "Generic (PLEG): container finished" podID="d9149bb7-d679-4b93-a2bc-a59c96d29817" containerID="1cf9775393972c0e7d5cff3081ac4d8c5b092aa43dc228cf3d6d8f89d10e5906" exitCode=0 Nov 22 10:01:02 crc kubenswrapper[4693]: I1122 10:01:02.764117 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396761-nd4mr" event={"ID":"d9149bb7-d679-4b93-a2bc-a59c96d29817","Type":"ContainerDied","Data":"1cf9775393972c0e7d5cff3081ac4d8c5b092aa43dc228cf3d6d8f89d10e5906"} Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.026094 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.219797 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-config-data\") pod \"d9149bb7-d679-4b93-a2bc-a59c96d29817\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.219873 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tqhlj\" (UniqueName: \"kubernetes.io/projected/d9149bb7-d679-4b93-a2bc-a59c96d29817-kube-api-access-tqhlj\") pod \"d9149bb7-d679-4b93-a2bc-a59c96d29817\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.219913 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-fernet-keys\") pod \"d9149bb7-d679-4b93-a2bc-a59c96d29817\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.220016 4693 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-combined-ca-bundle\") pod \"d9149bb7-d679-4b93-a2bc-a59c96d29817\" (UID: \"d9149bb7-d679-4b93-a2bc-a59c96d29817\") " Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.226641 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "d9149bb7-d679-4b93-a2bc-a59c96d29817" (UID: "d9149bb7-d679-4b93-a2bc-a59c96d29817"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.240338 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9149bb7-d679-4b93-a2bc-a59c96d29817-kube-api-access-tqhlj" (OuterVolumeSpecName: "kube-api-access-tqhlj") pod "d9149bb7-d679-4b93-a2bc-a59c96d29817" (UID: "d9149bb7-d679-4b93-a2bc-a59c96d29817"). InnerVolumeSpecName "kube-api-access-tqhlj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.248239 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9149bb7-d679-4b93-a2bc-a59c96d29817" (UID: "d9149bb7-d679-4b93-a2bc-a59c96d29817"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.270948 4693 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-config-data" (OuterVolumeSpecName: "config-data") pod "d9149bb7-d679-4b93-a2bc-a59c96d29817" (UID: "d9149bb7-d679-4b93-a2bc-a59c96d29817"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.323559 4693 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.323591 4693 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-config-data\") on node \"crc\" DevicePath \"\"" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.323602 4693 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tqhlj\" (UniqueName: \"kubernetes.io/projected/d9149bb7-d679-4b93-a2bc-a59c96d29817-kube-api-access-tqhlj\") on node \"crc\" DevicePath \"\"" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.323615 4693 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/d9149bb7-d679-4b93-a2bc-a59c96d29817-fernet-keys\") on node \"crc\" DevicePath \"\"" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.784760 4693 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29396761-nd4mr" event={"ID":"d9149bb7-d679-4b93-a2bc-a59c96d29817","Type":"ContainerDied","Data":"cfe6589589eecfef556d2a3a666797b9de63f989f2b53bb8dfb081896c54892e"} Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.784822 4693 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cfe6589589eecfef556d2a3a666797b9de63f989f2b53bb8dfb081896c54892e" Nov 22 10:01:04 crc kubenswrapper[4693]: I1122 10:01:04.784862 4693 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29396761-nd4mr" Nov 22 10:01:30 crc kubenswrapper[4693]: I1122 10:01:30.247021 4693 patch_prober.go:28] interesting pod/machine-config-daemon-scx6r container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 22 10:01:30 crc kubenswrapper[4693]: I1122 10:01:30.247810 4693 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-scx6r" podUID="7007d901-fc52-4723-a949-db71619b3305" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515110305001024427 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015110305001017344 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015110275721016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015110275721015456 5ustar corecore